repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
kennethgillen/ansible | lib/ansible/modules/system/runit.py | 42 | 9044 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author: "James Sumners (@jsumners)"
version_added: "2.3"
short_description: Manage runit services.
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, killed, reloaded, once ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
service_dir:
required: false
default: /var/service
description:
- directory runsv watches for services
service_src:
required: false
default: /etc/sv
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start sv dnscache, if not running
- sv:
name: dnscache
state: started
# Example action to stop sv dnscache, if running
- sv:
name: dnscache
state: stopped
# Example action to kill sv dnscache, in all cases
- sv:
name: dnscache
state: killed
# Example action to restart sv dnscache, in all cases
- sv:
name: dnscache
state: restarted
# Example action to reload sv dnscache, in all cases
- sv:
name: dnscache
state: reloaded
# Example using alt sv directory location
- sv:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import platform
import shlex
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ ]
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError:
e = get_exception()
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd,'force-stop',self.src_full])
try:
os.unlink(self.svc_full)
except OSError:
e = get_exception()
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(' (\d+)s', out)
if m:
self.duration = m.group(1)
if re.search('run:', out):
self.state = 'started'
elif re.search('down:', out):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception:
e = get_exception()
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool'),
dist = dict(required=False, default='runit'),
service_dir = dict(required=False, default='/var/service'),
service_src = dict(required=False, default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError):
e = get_exception()
module.fail_json(msg="Could not change service link: %s" % str(e))
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv,state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
| gpl-3.0 |
anchel/zookeeper | src/contrib/rest/src/python/zk_dump_tree.py | 130 | 3433 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt
import sys
import simplejson
import urllib2
from base64 import b64decode
printdata = False
fullpath = False
def dump_node(url, depth):
"""Dump the node, then dump children recursively
Arguments:
- `url`:
- `depth`:
"""
req = urllib2.urlopen(url)
resp = simplejson.load(req)
if 'Error' in resp:
raise resp['Error']
if fullpath:
name = resp['path']
else:
name = '/' + resp['path'].split('/')[-1]
data64 = resp.get('data64')
dataUtf8 = resp.get('dataUtf8')
if data64 and printdata:
data = b64decode(data64)
print '%(indent)s%(name)s = b64(%(data64)s) str(%(data)s)' % \
{'indent':' '*2*depth, 'name':name, 'data64':data64, 'data':data}
elif dataUtf8 and printdata:
print '%(indent)s%(name)s = %(data)s' % \
{'indent':' '*2*depth, 'name':name, 'data':dataUtf8}
else:
print '%(indent)s%(name)s' % {'indent':' '*2*depth, 'name':name}
req = urllib2.urlopen(resp['uri'] + '?view=children')
resp = simplejson.load(req)
for child in resp.get('children', []):
dump_node(resp['child_uri_template']
.replace("{child}", urllib2.quote(child)),
depth + 1)
def zk_dump_tree(url, root):
"""Dump the tree starting at the roota
Arguments:
- `root`:
"""
dump_node(url + '/znodes/v1' + root, 0)
def usage():
"""Usage
"""
print 'Usage: zk_dump_tree.py [-h|--help -u|--url=url -d|--data -f|--fullpath -r|--root=root]'
print ' where url is the url of the rest server, data is whether to'
print ' to include node data on output, root is the znode root'
print ' fullpath prints the full node path (useful for copy/paste)'
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],
"hu:dfr:", ["help", "url=", "data", "fullpath", "root="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
url ='http://localhost:9998'
root = '/'
for o, a in opts:
if o in ("-d", "--data"):
printdata = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-u", "--url"):
url = a
elif o in ("-r", "--root"):
root = a
elif o in ("-f", "--fullpath"):
fullpath = True
else:
assert False, "unhandled option"
print 'Accessing REST server at ' + url
zk_dump_tree(url, root)
| apache-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/gis/gdal/error.py | 466 | 1517 | """
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception): pass
class OGRException(Exception): pass
class SRSException(Exception): pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = { 1 : (OGRException, 'Not enough data.'),
2 : (OGRException, 'Not enough memory.'),
3 : (OGRException, 'Unsupported geometry type.'),
4 : (OGRException, 'Unsupported operation.'),
5 : (OGRException, 'Corrupt data.'),
6 : (OGRException, 'OGR failure.'),
7 : (SRSException, 'Unsupported SRS.'),
8 : (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e(msg)
else:
raise OGRException('Unknown error code: "%s"' % code)
| apache-2.0 |
vwvww/servo | tests/wpt/web-platform-tests/webdriver/tests/set_window_rect.py | 11 | 15091 | # META: timeout=long
import pytest
from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
alert_doc = inline("<script>window.alert()</script>")
def set_window_rect(session, rect):
return session.transport.send("POST", "session/%s/window/rect" % session.session_id, rect)
# 10.7.2 Set Window Rect
def test_current_top_level_browsing_context_no_longer_open(session, create_window):
"""
1. If the current top-level browsing context is no longer open,
return error with error code no such window.
"""
session.window_handle = create_window()
session.close()
response = set_window_rect(session, {})
assert_error(response, "no such window")
def test_handle_prompt_dismiss():
"""TODO"""
def test_handle_prompt_accept(new_session, add_browser_capabilites):
"""
2. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- accept state
Accept the current user prompt.
"""
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "accept"})}})
original = session.window.rect
# step 2
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert result.status == 200
assert_dialog_handled(session, "dismiss #1")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert result.status == 200
assert_dialog_handled(session, "dismiss #2")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_success(result)
assert_dialog_handled(session, "dismiss #3")
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_accept_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
def test_handle_prompt_missing_value(session, create_dialog):
"""
2. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- missing value default state
1. Dismiss the current user prompt.
2. Return error with error code unexpected alert open.
"""
original = session.window.rect
# step 2
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
@pytest.mark.parametrize("rect", [
{"width": "a"},
{"height": "b"},
{"width": "a", "height": "b"},
{"x": "a"},
{"y": "b"},
{"x": "a", "y": "b"},
{"width": "a", "height": "b", "x": "a", "y": "b"},
{"width": True},
{"height": False},
{"width": True, "height": False},
{"x": True},
{"y": False},
{"x": True, "y": False},
{"width": True, "height": False, "x": True, "y": False},
{"width": []},
{"height": []},
{"width": [], "height": []},
{"x": []},
{"y": []},
{"x": [], "y": []},
{"width": [], "height": [], "x": [], "y": []},
{"height": {}},
{"width": {}},
{"height": {}, "width": {}},
{"x": {}},
{"y": {}},
{"x": {}, "y": {}},
{"width": {}, "height": {}, "x": {}, "y": {}},
])
def test_invalid_types(session, rect):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, rect)
assert_error(response, "invalid argument")
@pytest.mark.parametrize("rect", [
{"width": -1},
{"height": -2},
{"width": -1, "height": -2},
])
def test_out_of_bounds(session, rect):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, rect)
assert_error(response, "invalid argument")
def test_width_height_floats(session):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
"""
response = set_window_rect(session, {"width": 500.5, "height": 420})
value = assert_success(response)
assert value["width"] == 500
assert value["height"] == 420
response = set_window_rect(session, {"width": 500, "height": 450.5})
value = assert_success(response)
assert value["width"] == 500
assert value["height"] == 450
def test_x_y_floats(session):
"""
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, {"x": 0.5, "y": 420})
value = assert_success(response)
assert value["x"] == 0
assert value["y"] == 420
response = set_window_rect(session, {"x": 100, "y": 450.5})
value = assert_success(response)
assert value["x"] == 100
assert value["y"] == 450
@pytest.mark.parametrize("rect", [
{},
{"width": None},
{"height": None},
{"width": None, "height": None},
{"x": None},
{"y": None},
{"x": None, "y": None},
{"width": None, "x": None},
{"width": None, "y": None},
{"height": None, "x": None},
{"height": None, "Y": None},
{"width": None, "height": None, "x": None, "y": None},
{"width": 200},
{"height": 200},
{"x": 200},
{"y": 200},
{"width": 200, "x": 200},
{"height": 200, "x": 200},
{"width": 200, "y": 200},
{"height": 200, "y": 200},
])
def test_no_change(session, rect):
"""
13. If width and height are not null:
[...]
14. If x and y are not null:
[...]
15. Return success with the JSON serialization of the current
top-level browsing context's window rect.
"""
original = session.window.rect
response = set_window_rect(session, rect)
assert_success(response, original)
def test_fully_exit_fullscreen(session):
"""
10. Fully exit fullscreen.
[...]
To fully exit fullscreen a document document, run these steps:
1. If document's fullscreen element is null, terminate these steps.
2. Unfullscreen elements whose fullscreen flag is set, within
document's top layer, except for document's fullscreen element.
3. Exit fullscreen document.
"""
session.window.fullscreen()
assert session.execute_script("return window.fullScreen") is True
response = set_window_rect(session, {"width": 400, "height": 400})
value = assert_success(response)
assert value["width"] == 400
assert value["height"] == 400
assert session.execute_script("return window.fullScreen") is False
def test_restore_from_minimized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
session.window.minimize()
assert session.execute_script("return document.hidden") is True
response = set_window_rect(session, {"width": 450, "height": 450})
value = assert_success(response)
assert value["width"] == 450
assert value["height"] == 450
assert session.execute_script("return document.hidden") is False
def test_restore_from_maximized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
original_size = session.window.size
session.window.maximize()
assert session.window.size != original_size
response = set_window_rect(session, {"width": 400, "height": 400})
value = assert_success(response)
assert value["width"] == 400
assert value["height"] == 400
def test_height_width(session):
original = session.window.rect
max = session.execute_script("""
return {
width: window.screen.availWidth,
height: window.screen.availHeight,
}""")
# step 12
response = set_window_rect(session, {"width": max["width"] - 100,
"height": max["height"] - 100})
# step 14
assert_success(response, {"x": original["x"],
"y": original["y"],
"width": max["width"] - 100,
"height": max["height"] - 100})
def test_height_width_larger_than_max(session):
max = session.execute_script("""
return {
width: window.screen.availWidth,
height: window.screen.availHeight,
}""")
# step 12
response = set_window_rect(session, {"width": max["width"] + 100,
"height": max["height"] + 100})
# step 14
rect = assert_success(response)
assert rect["width"] >= max["width"]
assert rect["height"] >= max["height"]
def test_height_width_as_current(session):
original = session.window.rect
# step 12
response = set_window_rect(session, {"width": original["width"],
"height": original["height"]})
# step 14
assert_success(response, {"x": original["x"],
"y": original["y"],
"width": original["width"],
"height": original["height"]})
def test_x_y(session):
original = session.window.rect
# step 13
response = set_window_rect(session, {"x": original["x"] + 10,
"y": original["y"] + 10})
# step 14
assert_success(response, {"x": original["x"] + 10,
"y": original["y"] + 10,
"width": original["width"],
"height": original["height"]})
def test_negative_x_y(session):
original = session.window.rect
# step 13
response = set_window_rect(session, {"x": - 8, "y": - 8})
# step 14
os = session.capabilities["platformName"]
# certain WMs prohibit windows from being moved off-screen
if os == "linux":
rect = assert_success(response)
assert rect["x"] <= 0
assert rect["y"] <= 0
assert rect["width"] == original["width"]
assert rect["height"] == original["height"]
# On macOS, windows can only be moved off the screen on the
# horizontal axis. The system menu bar also blocks windows from
# being moved to (0,0).
elif os == "darwin":
assert_success(response, {"x": -8,
"y": 23,
"width": original["width"],
"height": original["height"]})
# It turns out that Windows is the only platform on which the
# window can be reliably positioned off-screen.
elif os == "windows_nt":
assert_success(response, {"x": -8,
"y": -8,
"width": original["width"],
"height": original["height"]})
def test_move_to_same_position(session):
original_position = session.window.position
position = session.window.position = original_position
assert position == original_position
def test_move_to_same_x(session):
original_x = session.window.position[0]
position = session.window.position = (original_x, 345)
assert position == (original_x, 345)
def test_move_to_same_y(session):
original_y = session.window.position[1]
position = session.window.position = (456, original_y)
assert position == (456, original_y)
def test_resize_to_same_size(session):
original_size = session.window.size
size = session.window.size = original_size
assert size == original_size
def test_resize_to_same_width(session):
original_width = session.window.size[0]
size = session.window.size = (original_width, 345)
assert size == (original_width, 345)
def test_resize_to_same_height(session):
original_height = session.window.size[1]
size = session.window.size = (456, original_height)
assert size == (456, original_height)
def test_payload(session):
# step 14
response = set_window_rect(session, {"x": 400, "y": 400})
assert response.status == 200
assert isinstance(response.body["value"], dict)
value = response.body["value"]
assert "width" in value
assert "height" in value
assert "x" in value
assert "y" in value
assert isinstance(value["width"], int)
assert isinstance(value["height"], int)
assert isinstance(value["x"], int)
assert isinstance(value["y"], int)
| mpl-2.0 |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/distutils/tests/test_version.py | 165 | 2614 | """Tests for distutils.version."""
import unittest
from distutils.version import LooseVersion
from distutils.version import StrictVersion
from test.support import run_unittest
class VersionTestCase(unittest.TestCase):
def test_prerelease(self):
version = StrictVersion('1.2.3a1')
self.assertEqual(version.version, (1, 2, 3))
self.assertEqual(version.prerelease, ('a', 1))
self.assertEqual(str(version), '1.2.3a1')
version = StrictVersion('1.2.0')
self.assertEqual(str(version), '1.2')
def test_cmp_strict(self):
versions = (('1.5.1', '1.5.2b2', -1),
('161', '3.10a', ValueError),
('8.02', '8.02', 0),
('3.4j', '1996.07.12', ValueError),
('3.2.pl0', '3.1.1.6', ValueError),
('2g6', '11g', ValueError),
('0.9', '2.2', -1),
('1.2.1', '1.2', 1),
('1.1', '1.2.2', -1),
('1.2', '1.1', 1),
('1.2.1', '1.2.2', -1),
('1.2.2', '1.2', 1),
('1.2', '1.2.2', -1),
('0.4.0', '0.4', 0),
('1.13++', '5.5.kw', ValueError))
for v1, v2, wanted in versions:
try:
res = StrictVersion(v1)._cmp(StrictVersion(v2))
except ValueError:
if wanted is ValueError:
continue
else:
raise AssertionError(("cmp(%s, %s) "
"shouldn't raise ValueError")
% (v1, v2))
self.assertEqual(res, wanted,
'cmp(%s, %s) should be %s, got %s' %
(v1, v2, wanted, res))
def test_cmp(self):
versions = (('1.5.1', '1.5.2b2', -1),
('161', '3.10a', 1),
('8.02', '8.02', 0),
('3.4j', '1996.07.12', -1),
('3.2.pl0', '3.1.1.6', 1),
('2g6', '11g', -1),
('0.960923', '2.2beta29', -1),
('1.13++', '5.5.kw', -1))
for v1, v2, wanted in versions:
res = LooseVersion(v1)._cmp(LooseVersion(v2))
self.assertEqual(res, wanted,
'cmp(%s, %s) should be %s, got %s' %
(v1, v2, wanted, res))
def test_suite():
return unittest.makeSuite(VersionTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
open-austin/influence-texas | src/influencetx/tlo/scrapper/get_witnesses_for_bill.py | 1 | 4580 | import re
from bs4 import BeautifulSoup, SoupStrainer
import requests
'''
TODO:
- Need to parse witnesses for senate_url
'''
def get_witnesses_for_bill(bill_id, session):
# ex: "HB 864" -> "https://capitol.texas.gov/tlodocs/86R/witlistbill/html/HB00864H.htm"
parsed_bill_id = re.search(r"(\w+)\s+(\d+)", bill_id)
bill_type = parsed_bill_id.group(1)
bill_number = parsed_bill_id.group(2).zfill(5)
url_prefix = f"https://capitol.texas.gov/tlodocs/{session}R/witlistbill/html/{bill_type}{bill_number}"
house_url = f"{url_prefix}H.htm"
senate_url = f"{url_prefix}S.htm"
res = requests.get(house_url)
# ##### Basic Test
# # parsing all <p/> blocks up front may not be efficient
# filter = SoupStrainer('p') # only <p/> tags contain text that we care about
# text_blocks = BeautifulSoup(res.content, "html.parser", parse_only=filter)
# selecting = None;
# for block in text_blocks:
# text = block.get_text(strip=True)
# print(f"[{text}]")
return parse_witness_list_html(res.content)
'''
Split into its own function to support unit testing.
'''
def parse_witness_list_html(witness_list_html):
content = BeautifulSoup(witness_list_html, "html.parser").find("p")
# Make regex to check if text is header of a new section
# ex: make_section_regex(0) > "^for|^against|^on|^Registering, but not testifying|^for|^against|^on"
# Return "None" if we are at the last section and there are no more new sections to be discovered
def make_section_regex(current_section):
return ("|").join([w["regex"] for w in witness_sections[(current_section+1):]]) or None
# Check if we are at the start of a new section
def is_new_section(section_regex, text):
try:
return re.match(section_regex, text, re.IGNORECASE)
except:
return None
# Check if we are at the end of a witness list page
def is_page_end(text):
return re.match(r"^\d", text)
# Update our current_section
def get_new_section(text, current_section):
for i,w in enumerate(witness_sections[(current_section+1):], start=(current_section+1)):
if (re.match(w["regex"], text, re.IGNORECASE)):
return i, make_section_regex(i)
def get_next_line(content):
content = content.find_next('p')
try:
text = content.get_text(strip=True)
except:
text = None
return content, text
def get_witness_data(text, current_section):
m=re.match(r"(.+),\s+(.+)\s+\((.+)\)", text)
# print(f"firstname: {m.group(2)}")
# print(f"lastname: {m.group(1)}")
# print(f"representing: {m.group(3)}")
witness = {
"firstname": m.group(2),
"lastname": m.group(1),
"representing": [x.strip() for x in m.group(3).split(";")]
}
witness_sections[current_section]["witnesses"].append(witness)
witness_sections = [
{
"name": "for",
"regex": "^for(\s+)*:",
"witnesses": []
},
{
"name": "against",
"regex": "^against(\s+)*:",
"witnesses": []
},
{
"name": "on",
"regex": "^on(\s+)*:",
"witnesses": []
},
{
"name": "Registering, but not testifying",
"regex": "^Registering,\s+but\s+not\s+testifying(/s+)*:"
},
{
"name": "for_no_testify",
"regex": "^for(\s+)*:",
"witnesses": []
},
{
"name": "against_no_testify",
"regex": "^against(\s+)*:",
"witnesses": []
},
{
"name": "on_no_testify",
"regex": "^on(\s+)*:",
"witnesses": []
}
]
current_section = -1
section_regex = make_section_regex(current_section)
content, text = get_next_line(content)
while text:
if (is_new_section(section_regex, text)):
# If we're at a new section headering, update current_section and section_regex
current_section, section_regex = get_new_section(text, current_section)
# print(f"##### [{witness_sections[current_section]['name']}]")
elif (is_page_end(text)):
break
elif (current_section > -1):
get_witness_data(text, current_section)
content, text = get_next_line(content)
for section in witness_sections:
del section["regex"]
return witness_sections
| gpl-2.0 |
jelugbo/hebs_master | common/djangoapps/util/db.py | 17 | 1707 | """
Utility functions related to databases.
"""
from functools import wraps
from django.db import connection, transaction
def commit_on_success_with_read_committed(func): # pylint: disable=invalid-name
"""
Decorator which executes the decorated function inside a transaction with isolation level set to READ COMMITTED.
If the function returns a response the transaction is committed and if the function raises an exception the
transaction is rolled back.
Raises TransactionManagementError if there are already more than 1 level of transactions open.
Note: This only works on MySQL.
"""
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
if connection.vendor == 'mysql':
# The isolation level cannot be changed while a transaction is in progress. So we close any existing one.
if connection.transaction_state:
if len(connection.transaction_state) == 1:
connection.commit()
# We can commit all open transactions. But it does not seem like a good idea.
elif len(connection.transaction_state) > 1:
raise transaction.TransactionManagementError('Cannot change isolation level. '
'More than 1 level of nested transactions.')
# This will set the transaction isolation level to READ COMMITTED for the next transaction.
cursor = connection.cursor()
cursor.execute("SET TRANSACTION ISOLATION LEVEL READ COMMITTED")
with transaction.commit_on_success():
return func(*args, **kwargs)
return wrapper
| agpl-3.0 |
Petr-Kovalev/nupic-win32 | py/nupic/encoders/scalar.py | 1 | 24079 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
from nupic.encoders.base import Encoder, EncoderResult
from nupic.bindings.math import SM32, GetNTAReal
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
############################################################################
class ScalarEncoder(Encoder):
"""
A scalar encoder encodes a numeric (floating point) value into an array
of bits. The output is 0's except for a contiguous block of 1's. The
location of this contiguous block varies continuously with the input value.
The encoding is linear. If you want a nonlinear encoding, just transform
the scalar (e.g. by applying a logarithm function) before encoding.
It is not recommended to bin the data as a pre-processing step, e.g.
"1" = $0 - $.20, "2" = $.21-$0.80, "3" = $.81-$1.20, etc. as this
removes a lot of information and prevents nearby values from overlapping
in the output. Instead, use a continuous transformation that scales
the data (a piecewise transformation is fine).
Parameters:
-----------------------------------------------------------------------------
w -- The number of bits that are set to encode a single value - the
"width" of the output signal
restriction: w must be odd to avoid centering problems.
minval -- The minimum value of the input signal.
maxval -- The upper bound of the input signal
periodic -- If true, then the input value "wraps around" such that minval = maxval
For a periodic value, the input must be strictly less than maxval,
otherwise maxval is a true upper bound.
There are three mutually exclusive parameters that determine the overall size of
of the output. Only one of these should be specifed to the constructor:
n -- The number of bits in the output. Must be greater than or equal to w
radius -- Two inputs separated by more than the radius have non-overlapping
representations. Two inputs separated by less than the radius will
in general overlap in at least some of their bits. You can think
of this as the radius of the input.
resolution -- Two inputs separated by greater than the resolution are guaranteed
to have different representations.
Note: radius and resolution are specified w.r.t the input, not output. w is
specified w.r.t. the output.
Example:
day of week.
w = 3
Minval = 1 (Monday)
Maxval = 8 (Monday)
periodic = true
n = 14
[equivalently: radius = 1.5 or resolution = 0.5]
The following values would encode midnight -- the start of the day
monday (1) -> 11000000000001
tuesday(2) -> 01110000000000
wednesday(3) -> 00011100000000
...
sunday (7) -> 10000000000011
Since the resolution is 12 hours, we can also encode noon, as
monday noon -> 11100000000000
monday midnt-> 01110000000000
tuesday noon -> 00111000000000
etc.
It may not be natural to specify "n", especially with non-periodic
data. For example, consider encoding an input with a range of 1-10
(inclusive) using an output width of 5. If you specify resolution =
1, this means that inputs of 1 and 2 have different outputs, though
they overlap, but 1 and 1.5 might not have different outputs.
This leads to a 14-bit representation like this:
1 -> 11111000000000 (14 bits total)
2 -> 01111100000000
...
10-> 00000000011111
[resolution = 1; n=14; radius = 5]
You could specify resolution = 0.5, which gives
1 -> 11111000... (22 bits total)
1.5 -> 011111.....
2.0 -> 0011111....
[resolution = 0.5; n=22; radius=2.5]
You could specify radius = 1, which gives
1 -> 111110000000.... (50 bits total)
2 -> 000001111100....
3 -> 000000000011111...
...
10 -> .....000011111
[radius = 1; resolution = 0.2; n=50]
An N/M encoding can also be used to encode a binary value,
where we want more than one bit to represent each state.
For example, we could have: w = 5, minval = 0, maxval = 1,
radius = 1 (which is equivalent to n=10)
0 -> 1111100000
1 -> 0000011111
Implementation details:
--------------------------------------------------------------------------
range = maxval - minval
h = (w-1)/2 (half-width)
resolution = radius / w
n = w * range/radius (periodic)
n = w * range/radius + 2 * h (non-periodic)
"""
############################################################################
def __init__(self, w, minval, maxval, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=False):
"""
w -- number of bits to set in output
minval -- minimum input value
maxval -- maximum input value (input is strictly less if periodic == True)
Exactly one of n, radius, resolution must be set. "0" is a special
value that means "not set".
n -- number of bits in the representation (must be > w)
radius -- inputs separated by more than this distance will have non-overlapping
representations
resolution -- inputs separated by more than this distance will have different
representations
name -- an optional string which will become part of the description
clipInput -- if true, non-periodic inputs smaller than minval or greater
than maxval will be clipped to minval/maxval
See class documentation for more information.
"""
assert isinstance(w, int)
self.encoders = None
self.verbosity = verbosity
self.w = w
if (w % 2 == 0):
raise Exception("Width must be an odd number (%f)" % w)
self.minval = minval
self.maxval = maxval
self.periodic = periodic
self.clipInput = clipInput
self.halfwidth = (w - 1) / 2
# For non-periodic inputs, padding is the number of bits "outside" the range,
# on each side. I.e. the representation of minval is centered on some bit, and
# there are "padding" bits to the left of that centered bit; similarly with
# bits to the right of the center bit of maxval
if self.periodic:
self.padding = 0
else:
self.padding = self.halfwidth
if (minval is not None and maxval is not None):
if (minval >= maxval):
raise Exception("The encoder for %s is invalid. minval %s is greater than "
"or equal to maxval %s. minval must be strictly less "
"than maxval." % (name, minval, maxval))
self.rangeInternal = float(self.maxval - self.minval)
# There are three different ways of thinking about the representation. Handle
# each case here.
if n != 0:
assert radius == 0
assert resolution == 0
assert n > w
self.n = n
if (minval is not None and maxval is not None):
if not self.periodic:
self.resolution = float(self.rangeInternal) / (self.n - self.w)
else:
self.resolution = float(self.rangeInternal) / (self.n)
self.radius = self.w * self.resolution
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
else:
if radius != 0:
assert resolution == 0
self.radius = radius
self.resolution = float(self.radius) / w
elif resolution != 0:
self.resolution = resolution
self.radius = self.resolution * self.w
else:
raise Exception("One of n, radius, resolution must be specified for a"
"ScalarEncoder")
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
nfloat = self.w * (self.range / self.radius) + 2 * self.padding
self.n = int(math.ceil(nfloat))
# nInternal represents the output area excluding the possible padding on each
# side
self.nInternal = self.n - 2 * self.padding
# Our name
if name is not None:
self.name = name
else:
self.name = "[%s:%s]" % (self.minval, self.maxval)
# This matrix is used for the topDownCompute. We build it the first time
# topDownCompute is called
self._topDownMappingM = None
self._topDownValues = None
# This list is created by getBucketValues() the first time it is called,
# and re-created whenever our buckets would be re-arranged.
self._bucketValues = None
############################################################################
def getDecoderOutputFieldTypes(self):
""" [Encoder class virtual method override]
"""
return (FieldMetaType.float, )
############################################################################
def getWidth(self):
return self.n
############################################################################
def recalcParams(self):
self.rangeInternal = float(self.maxval - self.minval)
if not self.periodic:
self.resolution = float(self.rangeInternal) / (self.n - self.w)
else:
self.resolution = float(self.rangeInternal) / (self.n)
self.radius = self.w * self.resolution
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
name = "[%s:%s]" % (self.minval, self.maxval)
############################################################################
def getDescription(self):
return [(self.name, 0)]
############################################################################
def _getFirstOnBit(self, input):
""" Return the bit offset of the first bit to be set in the encoder output.
For periodic encoders, this can be a negative number when the encoded output
wraps around. """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
if input < self.minval:
# Don't clip periodic inputs. Out-of-range input is always an error
if self.clipInput and not self.periodic:
if self.verbosity > 0:
print "Clipped input %s=%.2f to minval %.2f" % (self.name, input,
self.minval)
input = self.minval
else:
raise Exception('input (%s) less than range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
if self.periodic:
# Don't clip periodic inputs. Out-of-range input is always an error
if input >= self.maxval:
raise Exception('input (%s) greater than periodic range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
else:
if input > self.maxval:
if self.clipInput:
if self.verbosity > 0:
print "Clipped input %s=%.2f to maxval %.2f" % (self.name, input,
self.maxval)
input = self.maxval
else:
raise Exception('input (%s) greater than range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
if self.periodic:
centerbin = int((input - self.minval) * self.nInternal / self.range) \
+ self.padding
else:
centerbin = int(((input - self.minval) + self.resolution/2) \
/ self.resolution ) + self.padding
# We use the first bit to be set in the encoded output as the bucket index
minbin = centerbin - self.halfwidth
return [minbin]
############################################################################
def getBucketIndices(self, input):
""" See method description in base.py """
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
minbin = self._getFirstOnBit(input)[0]
# For periodic encoders, the bucket index is the index of the center bit
if self.periodic:
bucketIdx = minbin + self.halfwidth
if bucketIdx < 0:
bucketIdx += self.n
# for non-periodic encoders, the bucket index is the index of the left bit
else:
bucketIdx = minbin
return [bucketIdx]
############################################################################
def encodeIntoArray(self, input, output, learn=True):
""" See method description in base.py """
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
# Get the bucket index to use
bucketIdx = self._getFirstOnBit(input)[0]
if bucketIdx is None:
# None is returned for missing value
output[0:self.n] = 0
else:
# The bucket index is the index of the first bit to set in the output
output[:self.n] = 0
minbin = bucketIdx
maxbin = minbin + 2*self.halfwidth
if self.periodic:
# Handle the edges by computing wrap-around
if maxbin >= self.n:
bottombins = maxbin - self.n + 1
output[:bottombins] = 1
maxbin = self.n - 1
if minbin < 0:
topbins = -minbin
output[self.n - topbins:self.n] = 1
minbin = 0
assert minbin >= 0
assert maxbin < self.n
# set the output (except for periodic wraparound)
output[minbin:maxbin + 1] = 1
# Debug the decode() method
if self.verbosity >= 2:
print
print "input:", input
print "range:", self.minval, "-", self.maxval
print "n:", self.n, "w:", self.w, "resolution:", self.resolution, \
"radius", self.radius, "periodic:", self.periodic
print "output:",
self.pprint(output)
print "input desc:", self.decodedToStr(self.decode(output))
############################################################################
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
# For now, we simply assume any top-down output greater than 0
# is ON. Eventually, we will probably want to incorporate the strength
# of each top-down output.
tmpOutput = numpy.array(encoded[:self.n] > 0).astype(encoded.dtype)
if not tmpOutput.any():
return (dict(), [])
# ------------------------------------------------------------------------
# First, assume the input pool is not sampled 100%, and fill in the
# "holes" in the encoded representation (which are likely to be present
# if this is a coincidence that was learned by the SP).
# Search for portions of the output that have "holes"
maxZerosInARow = self.halfwidth
for i in xrange(maxZerosInARow):
searchStr = numpy.ones(i + 3, dtype=encoded.dtype)
searchStr[1:-1] = 0
subLen = len(searchStr)
# Does this search string appear in the output?
if self.periodic:
for j in xrange(self.n):
outputIndices = numpy.arange(j, j + subLen)
outputIndices %= self.n
if numpy.array_equal(searchStr, tmpOutput[outputIndices]):
tmpOutput[outputIndices] = 1
else:
for j in xrange(self.n - subLen + 1):
if numpy.array_equal(searchStr, tmpOutput[j:j + subLen]):
tmpOutput[j:j + subLen] = 1
if self.verbosity >= 2:
print "raw output:", encoded[:self.n]
print "filtered output:", tmpOutput
# ------------------------------------------------------------------------
# Find each run of 1's.
nz = tmpOutput.nonzero()[0]
runs = [] # will be tuples of (startIdx, runLength)
run = [nz[0], 1]
i = 1
while (i < len(nz)):
if nz[i] == run[0] + run[1]:
run[1] += 1
else:
runs.append(run)
run = [nz[i], 1]
i += 1
runs.append(run)
# If we have a periodic encoder, merge the first and last run if they
# both go all the way to the edges
if self.periodic and len(runs) > 1:
if runs[0][0] == 0 and runs[-1][0] + runs[-1][1] == self.n:
runs[-1][1] += runs[0][1]
runs = runs[1:]
# ------------------------------------------------------------------------
# Now, for each group of 1's, determine the "left" and "right" edges, where
# the "left" edge is inset by halfwidth and the "right" edge is inset by
# halfwidth.
# For a group of width w or less, the "left" and "right" edge are both at
# the center position of the group.
ranges = []
for run in runs:
(start, runLen) = run
if runLen <= self.w:
left = right = start + runLen / 2
else:
left = start + self.halfwidth
right = start + runLen - 1 - self.halfwidth
# Convert to input space.
if not self.periodic:
inMin = (left - self.padding) * self.resolution + self.minval
inMax = (right - self.padding) * self.resolution + self.minval
else:
inMin = (left - self.padding) * self.range / self.nInternal + self.minval
inMax = (right - self.padding) * self.range / self.nInternal + self.minval
# Handle wrap-around if periodic
if self.periodic:
if inMin >= self.maxval:
inMin -= self.range
inMax -= self.range
# Clip low end
if inMin < self.minval:
inMin = self.minval
if inMax < self.minval:
inMax = self.minval
# If we have a periodic encoder, and the max is past the edge, break into
# 2 separate ranges
if self.periodic and inMax >= self.maxval:
ranges.append([inMin, self.maxval])
ranges.append([self.minval, inMax - self.range])
else:
if inMax > self.maxval:
inMax = self.maxval
if inMin > self.maxval:
inMin = self.maxval
ranges.append([inMin, inMax])
desc = self._generateRangeDescription(ranges)
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (ranges, desc)}, [fieldName])
def _generateRangeDescription(self, ranges):
# -------------------------------------------------------------------------
# Form a text description of the ranges
desc = ""
numRanges = len(ranges)
for i in xrange(numRanges):
if ranges[i][0] != ranges[i][1]:
desc += "%.2f-%.2f" % (ranges[i][0], ranges[i][1])
else:
desc += "%.2f" % (ranges[i][0])
if i < numRanges - 1:
desc += ", "
return desc
############################################################################
def _getTopDownMapping(self):
""" Return the interal _topDownMappingM matrix used for handling the
bucketInfo() and topDownCompute() methods. This is a matrix, one row per
category (bucket) where each row contains the encoded output for that
category.
"""
# Do we need to build up our reverse mapping table?
if self._topDownMappingM is None:
# The input scalar value corresponding to each possible output encoding
if self.periodic:
self._topDownValues = numpy.arange(self.minval + self.resolution / 2,
self.maxval,
self.resolution)
else:
#Number of values is (max-min)/resolutions
self._topDownValues = numpy.arange(self.minval,
self.maxval + self.resolution / 2 ,
self.resolution)
# Each row represents an encoded output pattern
numCategories = len(self._topDownValues)
self._topDownMappingM = SM32(numCategories, self.n)
outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())
for i in xrange(numCategories):
value = self._topDownValues[i]
value = max(value, self.minval)
value = min(value, self.maxval)
self.encodeIntoArray(value, outputSpace, learn=False)
self._topDownMappingM.setRowFromDense(i, outputSpace)
return self._topDownMappingM
############################################################################
def getBucketValues(self):
""" See the function description in base.py """
# Need to re-create?
if self._bucketValues is None:
topDownMappingM = self._getTopDownMapping()
numBuckets = topDownMappingM.nRows()
self._bucketValues = []
for bucketIdx in range(numBuckets):
self._bucketValues.append(self.getBucketInfo([bucketIdx])[0].value)
return self._bucketValues
############################################################################
def getBucketInfo(self, buckets):
""" See the function description in base.py """
# Get/generate the topDown mapping table
#NOTE: although variable topDownMappingM is unused, some (bad-style) actions
#are executed during _getTopDownMapping() so this line must stay here
topDownMappingM = self._getTopDownMapping()
# The "category" is simply the bucket index
category = buckets[0]
encoding = self._topDownMappingM.getRow(category)
# Which input value does this correspond to?
if self.periodic:
inputVal = self.minval + self.resolution/2 + category * self.resolution
else:
inputVal = self.minval + category * self.resolution
return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]
############################################################################
def topDownCompute(self, encoded):
""" See the function description in base.py
"""
# Get/generate the topDown mapping table
topDownMappingM = self._getTopDownMapping()
# See which "category" we match the closest.
category = topDownMappingM.rightVecProd(encoded).argmax()
# Return that bucket info
return self.getBucketInfo([category])
############################################################################
def closenessScores(self, expValues, actValues, fractional=True):
""" See the function description in base.py
"""
expValue = expValues[0]
actValue = actValues[0]
if self.periodic:
expValue = expValue % self.maxval
actValue = actValue % self.maxval
err = abs(expValue - actValue)
if self.periodic:
err = min(err, self.maxval - err)
if fractional:
pctErr = float(err) / (self.maxval - self.minval)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
closeness = err
return numpy.array([closeness])
############################################################################
def dump(self):
print "ScalarEncoder:"
print " min: %f" % self.minval
print " max: %f" % self.maxval
print " w: %d" % self.w
print " n: %d" % self.n
print " resolution: %f" % self.resolution
print " radius: %f" % self.radius
print " periodic: %s" % self.periodic
print " nInternal: %d" % self.nInternal
print " rangeInternal: %f" % self.rangeInternal
print " padding: %d" % self.padding
| gpl-3.0 |
switchboardOp/ansible | contrib/inventory/docker.py | 54 | 33549 | #!/usr/bin/env python
#
# (c) 2016 Paul Durivage <paul.durivage@gmail.com>
# Chris Houseknecht <house@redhat.com>
# James Tanner <jtanner@redhat.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
Docker Inventory Script
=======================
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
because the inventory is generated at run-time rather than being read from a static file. The script generates the
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
script contacts can be defined using environment variables or a configuration file.
Requirements
------------
Using the docker modules requires having docker-py <https://docker-py.readthedocs.org/en/stable/>
installed on the host running Ansible. To install docker-py:
pip install docker-py
Run for Specific Host
---------------------
When run for a specific container using the --host option this script returns the following hostvars:
{
"ansible_ssh_host": "",
"ansible_ssh_port": 0,
"docker_apparmorprofile": "",
"docker_args": [],
"docker_config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/hello"
],
"Domainname": "",
"Entrypoint": null,
"Env": null,
"Hostname": "9f2f80b0a702",
"Image": "hello-world",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": null,
"WorkingDir": ""
},
"docker_created": "2016-04-18T02:05:59.659599249Z",
"docker_driver": "aufs",
"docker_execdriver": "native-0.2",
"docker_execids": null,
"docker_graphdriver": {
"Data": null,
"Name": "aufs"
},
"docker_hostconfig": {
"Binds": null,
"BlkioWeight": 0,
"CapAdd": null,
"CapDrop": null,
"CgroupParent": "",
"ConsoleSize": [
0,
0
],
"ContainerIDFile": "",
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuShares": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"KernelMemory": 0,
"Links": null,
"LogConfig": {
"Config": {},
"Type": "json-file"
},
"LxcConf": null,
"Memory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"NetworkMode": "default",
"OomKillDisable": false,
"PidMode": "host",
"PortBindings": null,
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"RestartPolicy": {
"MaximumRetryCount": 0,
"Name": ""
},
"SecurityOpt": [
"label:disable"
],
"UTSMode": "",
"Ulimits": null,
"VolumeDriver": "",
"VolumesFrom": null
},
"docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
"docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
"docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
"docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
"docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log",
"docker_mountlabel": "",
"docker_mounts": [],
"docker_name": "/hello-world",
"docker_networksettings": {
"Bridge": "",
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"HairpinMode": false,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"MacAddress": "",
"Networks": {
"bridge": {
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": ""
}
},
"Ports": null,
"SandboxID": "",
"SandboxKey": "",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null
},
"docker_path": "/hello",
"docker_processlabel": "",
"docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
"docker_restartcount": 0,
"docker_short_id": "9f2f80b0a7023",
"docker_state": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2016-04-18T02:06:00.296619369Z",
"OOMKilled": false,
"Paused": false,
"Pid": 0,
"Restarting": false,
"Running": false,
"StartedAt": "2016-04-18T02:06:00.272065041Z",
"Status": "exited"
}
}
Groups
------
When run in --list mode (the default), container instances are grouped by:
- container id
- container name
- container short id
- image_name (image_<image name>)
- docker_host
- running
- stopped
Configuration:
--------------
You can control the behavior of the inventory script by passing arguments, defining environment variables, or
creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
is command line args, then the docker.yml file and finally environment variables.
Environment variables:
......................
To connect to a single Docker API the following variables can be defined in the environment to control the connection
options. These are the same environment variables used by the Docker modules.
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
DOCKER_API_VERSION:
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_TIMEOUT:
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
DOCKER_TLS:
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
DOCKER_TLS_VERIFY:
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False
DOCKER_TLS_HOSTNAME:
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to localhost.
DOCKER_CERT_PATH:
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_SSL_VERSION:
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
In addition to the connection variables there are a couple variables used to control the execution and output of the
script:
DOCKER_CONFIG_FILE
Path to the configuration file. Defaults to ./docker.yml.
DOCKER_PRIVATE_SSH_PORT:
The private port (container port) on which SSH is listening for connections. Defaults to 22.
DOCKER_DEFAULT_IP:
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
Configuration File
..................
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
The default name of the file is derived from the name of the inventory script. By default the script will look for
basename of the script (i.e. docker) with an extension of '.yml'.
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
Here's what you can define in docker_inventory.yml:
defaults
Defines a default connection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
hosts
If you wish to get inventory from more than one Docker host, define a hosts list.
For the default host and each host in the hosts list define the following attributes:
host:
description: The URL or Unix socket path used to connect to the Docker API.
required: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
tls_verify:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
cert_path:
description: Path to the client's TLS certificate file.
default: null
required: false
cacert_path:
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
default: null
required: false
key_path:
description: Path to the client's TLS key file.
default: null
required: false
version:
description: The Docker API version.
required: false
default: will be supplied by the docker-py module.
timeout:
description: The amount of time in seconds to wait on an API response.
required: false
default: 60
default_ip:
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
required: false
default: 127.0.0.1
private_ssh_port:
description: The port containers use for SSH
required: false
default: 22
Examples
--------
# Connect to the Docker API on localhost port 4243 and format the JSON output
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
# another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
# Simple playbook to invoke with the above example:
- name: Test docker_inventory
hosts: all
connection: local
gather_facts: no
tasks:
- debug: msg="Container - {{ inventory_hostname }}"
'''
import os
import sys
import json
import argparse
import re
import yaml
from collections import defaultdict
# Manipulation of the path is needed because the docker-py
# module is imported by the name docker, and because this file
# is also named docker
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
try:
del sys.path[sys.path.index(path)]
except:
pass
HAS_DOCKER_PY = True
HAS_DOCKER_ERROR = False
try:
from docker import Client
from docker.errors import APIError, TLSParameterError
from docker.tls import TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_IP = '127.0.0.1'
DEFAULT_SSH_PORT = '22'
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
DOCKER_ENV_ARGS = dict(
config_file='DOCKER_CONFIG_FILE',
docker_host='DOCKER_HOST',
api_version='DOCKER_API_VERSION',
cert_path='DOCKER_CERT_PATH',
ssl_version='DOCKER_SSL_VERSION',
tls='DOCKER_TLS',
tls_verify='DOCKER_TLS_VERIFY',
timeout='DOCKER_TIMEOUT',
private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
default_ip='DOCKER_DEFAULT_IP',
)
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def log(msg, pretty_print=False):
if pretty_print:
print(json.dumps(msg, sort_keys=True, indent=2))
else:
print(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, auth_params, debug):
self.auth_params = auth_params
self.debug = debug
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def fail(self, msg):
fail(msg)
def log(self, msg, pretty_print=False):
if self.debug:
log(msg, pretty_print)
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("auth params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
"You may also use TLS without verification by setting the tls parameter to true." \
% (self.auth_params['tls_hostname'], match.group(1), match.group(1))
self.fail(msg)
self.fail("SSL Exception: %s" % (error))
class EnvArgs(object):
def __init__(self):
self.config_file = None
self.docker_host = None
self.api_version = None
self.cert_path = None
self.ssl_version = None
self.tls = None
self.tls_verify = None
self.tls_hostname = None
self.timeout = None
self.default_ssh_port = None
self.default_ip = None
class DockerInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
self._env_args = self._parse_env_args()
self.groups = defaultdict(list)
self.hostvars = defaultdict(dict)
def run(self):
config_from_file = self._parse_config_file()
if not config_from_file:
config_from_file = dict()
docker_hosts = self.get_hosts(config_from_file)
for host in docker_hosts:
client = AnsibleDockerClient(host, self._args.debug)
self.get_inventory(client, host)
if not self._args.host:
self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
self.groups['_meta'] = dict(
hostvars=self.hostvars
)
print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
else:
print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
sys.exit(0)
def get_inventory(self, client, host):
ssh_port = host.get('default_ssh_port')
default_ip = host.get('default_ip')
hostname = host.get('docker_host')
try:
containers = client.containers(all=True)
except Exception as exc:
self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
for container in containers:
id = container.get('Id')
short_id = id[:13]
try:
name = container.get('Names', list()).pop(0).lstrip('/')
except IndexError:
name = short_id
if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
try:
inspect = client.inspect_container(id)
except Exception as exc:
self.fail("Error inspecting container %s - %s" % (name, str(exc)))
running = inspect.get('State', dict()).get('Running')
# Add container to groups
image_name = inspect.get('Config', dict()).get('Image')
if image_name:
self.groups["image_%s" % (image_name)].append(name)
self.groups[id].append(name)
self.groups[name].append(name)
if short_id not in self.groups:
self.groups[short_id].append(name)
self.groups[hostname].append(name)
if running is True:
self.groups['running'].append(name)
else:
self.groups['stopped'].append(name)
# Figure ous ssh IP and Port
try:
# Lookup the public facing port Nat'ed to ssh port.
port = client.port(container, ssh_port)[0]
except (IndexError, AttributeError, TypeError):
port = dict()
try:
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
except KeyError:
ip = ''
facts = dict(
ansible_ssh_host=ip,
ansible_ssh_port=port.get('HostPort', int()),
docker_name=name,
docker_short_id=short_id
)
for key in inspect:
fact_key = self._slugify(key)
facts[fact_key] = inspect.get(key)
self.hostvars[name].update(facts)
def _slugify(self, value):
return 'docker_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def get_hosts(self, config):
'''
Determine the list of docker hosts we need to talk to.
:param config: dictionary read from config file. can be empty.
:return: list of connection dictionaries
'''
hosts = list()
hosts_list = config.get('hosts')
defaults = config.get('defaults', dict())
self.log('defaults:')
self.log(defaults, pretty_print=True)
def_host = defaults.get('host')
def_tls = defaults.get('tls')
def_tls_verify = defaults.get('tls_verify')
def_tls_hostname = defaults.get('tls_hostname')
def_ssl_version = defaults.get('ssl_version')
def_cert_path = defaults.get('cert_path')
def_cacert_path = defaults.get('cacert_path')
def_key_path = defaults.get('key_path')
def_version = defaults.get('version')
def_timeout = defaults.get('timeout')
def_ip = defaults.get('default_ip')
def_ssh_port = defaults.get('private_ssh_port')
if hosts_list:
# use hosts from config file
for host in hosts_list:
docker_host = host.get('host') or def_host or self._args.docker_host or \
self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = host.get('version') or def_version or self._args.api_version or \
self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
self._env_args.tls_hostname
tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
self._env_args.ssl_version
cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = host.get('key_path') or def_key_path or self._args.key_path or \
self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
DEFAULT_TIMEOUT_SECONDS
default_ip = host.get('default_ip') or def_ip or self._args.default_ip_address or \
DEFAULT_IP
default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
else:
# use default definition
docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = def_version or self._args.api_version or self._env_args.api_version or \
DEFAULT_DOCKER_API_VERSION
tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname
tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = def_key_path or self._args.key_path or self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
default_ip = def_ip or self._args.default_ip_address or DEFAULT_IP
default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
self.log("hosts: ")
self.log(hosts, pretty_print=True)
return hosts
def _parse_config_file(self):
config = dict()
config_path = None
if self._args.config_file:
config_path = self._args.config_file
elif self._env_args.config_file:
config_path = self._env_args.config_file
if config_path:
try:
config_file = os.path.abspath(config_path)
except:
config_file = None
if config_file and os.path.exists(config_file):
with open(config_file) as f:
try:
config = yaml.safe_load(f.read())
except Exception as exc:
self.fail("Error: parsing %s - %s" % (config_path, str(exc)))
return config
def log(self, msg, pretty_print=False):
if self._args.debug:
log(msg, pretty_print)
def fail(self, msg):
fail(msg)
def _parse_env_args(self):
args = EnvArgs()
for key, value in DOCKER_ENV_ARGS.items():
if os.environ.get(value):
val = os.environ.get(value)
if val in BOOLEANS_TRUE:
val = True
if val in BOOLEANS_FALSE:
val = False
setattr(args, key, val)
return args
def _parse_cli_args(self):
# Parse command line arguments
basename = os.path.splitext(os.path.basename(__file__))[0]
default_config = basename + '.yml'
parser = argparse.ArgumentParser(
description='Return Ansible inventory for one or more Docker hosts.')
parser.add_argument('--list', action='store_true', default=True,
help='List all containers (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Only get information for a specific container.')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--config-file', action='store', default=default_config,
help="Name of the config file to use. Default is %s" % (default_config))
parser.add_argument('--docker-host', action='store', default=None,
help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
% (DEFAULT_DOCKER_HOST))
parser.add_argument('--tls-hostname', action='store', default='localhost',
help="Host name to expect in TLS certs. Defaults to 'localhost'")
parser.add_argument('--api-version', action='store', default=None,
help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
parser.add_argument('--timeout', action='store', default=None,
help="Docker connection timeout in seconds. Defaults to %s"
% (DEFAULT_TIMEOUT_SECONDS))
parser.add_argument('--cacert-path', action='store', default=None,
help="Path to the TLS certificate authority pem file.")
parser.add_argument('--cert-path', action='store', default=None,
help="Path to the TLS certificate pem file.")
parser.add_argument('--key-path', action='store', default=None,
help="Path to the TLS encryption key pem file.")
parser.add_argument('--ssl-version', action='store', default=None,
help="TLS version number")
parser.add_argument('--tls', action='store_true', default=None,
help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
parser.add_argument('--tls-verify', action='store_true', default=None,
help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
parser.add_argument('--private-ssh-port', action='store', default=None,
help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
parser.add_argument('--default-ip-address', action='store', default=None,
help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
return parser.parse_args()
def _json_format_dict(self, data, pretty_print=False):
# format inventory data for output
if pretty_print:
return json.dumps(data, sort_keys=True, indent=4)
else:
return json.dumps(data)
def main():
if not HAS_DOCKER_PY:
fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
DockerInventory().run()
main()
| gpl-3.0 |
OMNIL-Infinity/volatility | volatility/plugins/overlays/windows/pe_vtypes.py | 44 | 17470 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.exceptions as exceptions
import volatility.obj as obj
pe_vtypes = {
'_IMAGE_EXPORT_DIRECTORY': [ 0x28, {
'Base': [ 0x10, ['unsigned int']],
'NumberOfFunctions': [ 0x14, ['unsigned int']],
'NumberOfNames': [ 0x18, ['unsigned int']],
'AddressOfFunctions': [ 0x1C, ['unsigned int']],
'AddressOfNames': [ 0x20, ['unsigned int']],
'AddressOfNameOrdinals': [ 0x24, ['unsigned int']],
}],
'_IMAGE_IMPORT_DESCRIPTOR': [ 0x14, {
# 0 for terminating null import descriptor
'OriginalFirstThunk': [ 0x0, ['unsigned int']],
'TimeDateStamp': [ 0x4, ['unsigned int']],
'ForwarderChain': [ 0x8, ['unsigned int']],
'Name': [ 0xC, ['unsigned int']],
# If bound this has actual addresses
'FirstThunk': [ 0x10, ['unsigned int']],
}],
'_IMAGE_THUNK_DATA' : [ 0x4, {
# Fake member for testing if the highest bit is set
'OrdinalBit' : [ 0x0, ['BitField', dict(start_bit = 31, end_bit = 32)]],
'Function' : [ 0x0, ['pointer', ['void']]],
'Ordinal' : [ 0x0, ['unsigned long']],
'AddressOfData' : [ 0x0, ['unsigned int']],
'ForwarderString' : [ 0x0, ['unsigned int']],
}],
'_IMAGE_IMPORT_BY_NAME' : [ None, {
'Hint' : [ 0x0, ['unsigned short']],
'Name' : [ 0x2, ['String', dict(length = 128)]],
}],
}
pe_vtypes_64 = {
'_IMAGE_THUNK_DATA' : [ 0x8, {
# Fake member for testing if the highest bit is set
'OrdinalBit' : [ 0x0, ['BitField', dict(start_bit = 63, end_bit = 64)]],
'Function' : [ 0x0, ['pointer64', ['void']]],
'Ordinal' : [ 0x0, ['unsigned long long']],
'AddressOfData' : [ 0x0, ['unsigned long long']],
'ForwarderString' : [ 0x0, ['unsigned long long']],
}],
}
class _IMAGE_EXPORT_DIRECTORY(obj.CType):
"""Class for PE export directory"""
def valid(self, nt_header):
"""
Check the sanity of export table fields.
The RVAs cannot be larger than the module size. The function
and name counts cannot be larger than 32K.
"""
try:
return (self.AddressOfFunctions < nt_header.OptionalHeader.SizeOfImage and
self.AddressOfNameOrdinals < nt_header.OptionalHeader.SizeOfImage and
self.AddressOfNames < nt_header.OptionalHeader.SizeOfImage and
self.NumberOfFunctions < 0x7FFF and
self.NumberOfNames < 0x7FFF)
except obj.InvalidOffsetError:
return False
def _name(self, name_rva):
"""
Return a String object for the function name.
Names are truncated at 128 characters although its possible
they may be longer. Thus, infrequently a function name will
be missing some data. However, that's better than hard-coding
a larger value which frequently causes us to cross page
boundaries and return a NoneObject anyway.
"""
return obj.Object("String",
offset = self.obj_parent.DllBase + name_rva,
vm = self.obj_native_vm, length = 128)
def _exported_functions(self):
"""
Generator for exported functions.
@return: tuple (Ordinal, FunctionRVA, Name)
Ordinal is an integer and should never be None. If the function
is forwarded, FunctionRVA is None. Otherwise, FunctionRVA is an
RVA to the function's code (relative to module base). Name is a
String containing the exported function's name. If the Name is
paged, it will be None. If the function is forwarded, Name is the
forwarded function name including the DLL (ntdll.EtwLogTraceEvent).
"""
mod_base = self.obj_parent.DllBase
exp_dir = self.obj_parent.export_dir()
# PE files with a large number of functions will have arrays
# that spans multiple pages. Thus the first entries may be valid,
# last entries may be valid, but middle entries may be invalid
# (paged). In the various checks below, we test for None (paged)
# and zero (non-paged but invalid RVA).
# Array of RVAs to function code
address_of_functions = obj.Object('Array',
offset = mod_base + self.AddressOfFunctions,
targetType = 'unsigned int',
count = self.NumberOfFunctions,
vm = self.obj_native_vm)
# Array of RVAs to function names
address_of_names = obj.Object('Array',
offset = mod_base + self.AddressOfNames,
targetType = 'unsigned int',
count = self.NumberOfNames,
vm = self.obj_native_vm)
# Array of RVAs to function ordinals
address_of_name_ordinals = obj.Object('Array',
offset = mod_base + self.AddressOfNameOrdinals,
targetType = 'unsigned short',
count = self.NumberOfNames,
vm = self.obj_native_vm)
# When functions are exported by Name, it will increase
# NumberOfNames by 1 and NumberOfFunctions by 1. When
# functions are exported by Ordinal, only the NumberOfFunctions
# will increase. First we enum functions exported by Name
# and track their corresponding Ordinals, so that when we enum
# functions exported by Ordinal only, we don't duplicate.
seen_ordinals = []
# Handle functions exported by name *and* ordinal
for i in range(self.NumberOfNames):
name_rva = address_of_names[i]
ordinal = address_of_name_ordinals[i]
if name_rva in (0, None):
continue
# Check the sanity of ordinal values before using it as an index
if ordinal == None or ordinal >= self.NumberOfFunctions:
continue
func_rva = address_of_functions[ordinal]
if func_rva in (0, None):
continue
# Handle forwarded exports. If the function's RVA is inside the exports
# section (as given by the VirtualAddress and Size fields in the
# DataDirectory), the symbol is forwarded. Return the name of the
# forwarded function and None as the function address.
if (func_rva >= exp_dir.VirtualAddress and
func_rva < exp_dir.VirtualAddress + exp_dir.Size):
n = self._name(func_rva)
f = obj.NoneObject("Ordinal function {0} in module {1} forwards to {2}".format(
ordinal, self.obj_parent.BaseDllName, n))
else:
n = self._name(name_rva)
f = func_rva
# Add the ordinal base and save it
ordinal += self.Base
seen_ordinals.append(ordinal)
yield ordinal, f, n
# Handle functions exported by ordinal only
for i in range(self.NumberOfFunctions):
ordinal = self.Base + i
# Skip functions already enumberated above
if ordinal not in seen_ordinals:
func_rva = address_of_functions[i]
if func_rva in (0, None):
continue
seen_ordinals.append(ordinal)
# There is no name RVA
yield ordinal, func_rva, obj.NoneObject("Name RVA not accessible")
class _IMAGE_IMPORT_DESCRIPTOR(obj.CType):
"""Handles IID entries for imported functions"""
def valid(self, nt_header):
"""Check the validity of some fields"""
try:
return (self.OriginalFirstThunk != 0 and
self.OriginalFirstThunk < nt_header.OptionalHeader.SizeOfImage and
self.FirstThunk != 0 and
self.FirstThunk < nt_header.OptionalHeader.SizeOfImage and
self.Name < nt_header.OptionalHeader.SizeOfImage)
except obj.InvalidOffsetError:
return False
def _name(self, name_rva):
"""Return a String object for the name at the given RVA"""
return obj.Object("String",
offset = self.obj_parent.DllBase + name_rva,
vm = self.obj_native_vm, length = 128)
def dll_name(self):
"""Returns the name of the DLL for this IID"""
return self._name(self.Name)
def _imported_functions(self):
"""
Generator for imported functions.
@return: tuple (Ordinal, FunctionVA, Name)
If the function is imported by ordinal, then Ordinal is the
ordinal value and Name is None.
If the function is imported by name, then Ordinal is the
hint and Name is the imported function name (or None if its
paged).
FunctionVA is the virtual address of the imported function,
as applied to the IAT by the Windows loader. If the FirstThunk
is paged, then FunctionVA will be None.
"""
i = 0
while 1:
thunk = obj.Object('_IMAGE_THUNK_DATA',
offset = self.obj_parent.DllBase + self.OriginalFirstThunk +
i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),
vm = self.obj_native_vm)
# We've reached the end when the element is zero
if thunk == None or thunk.AddressOfData == 0:
break
o = obj.NoneObject("Ordinal not accessible?")
n = obj.NoneObject("Imported by ordinal?")
f = obj.NoneObject("FirstThunk not accessible")
# If the highest bit (32 for x86 and 64 for x64) is set, the function is
# imported by ordinal and the lowest 16-bits contain the ordinal value.
# Otherwise, the lowest bits (0-31 for x86 and 0-63 for x64) contain an
# RVA to an _IMAGE_IMPORT_BY_NAME struct.
if thunk.OrdinalBit == 1:
o = thunk.Ordinal & 0xFFFF
else:
iibn = obj.Object("_IMAGE_IMPORT_BY_NAME",
offset = self.obj_parent.DllBase +
thunk.AddressOfData,
vm = self.obj_native_vm)
o = iibn.Hint
n = iibn.Name
# See if the import is bound (i.e. resolved)
first_thunk = obj.Object('_IMAGE_THUNK_DATA',
offset = self.obj_parent.DllBase + self.FirstThunk +
i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),
vm = self.obj_native_vm)
if first_thunk:
f = first_thunk.Function.v()
yield o, f, n
i += 1
def is_list_end(self):
"""Returns True if we've reached the list end"""
data = self.obj_vm.zread(
self.obj_offset,
self.obj_vm.profile.get_obj_size('_IMAGE_IMPORT_DESCRIPTOR')
)
return data.count(chr(0)) == len(data)
class _LDR_DATA_TABLE_ENTRY(obj.CType):
"""
Class for PE file / modules
If these classes are instantiated by _EPROCESS.list_*_modules()
then its guaranteed to be in the process address space.
FIXME: If these classes are found by modscan, ensure we can
dereference properly with obj_native_vm.
"""
def _nt_header(self):
"""Return the _IMAGE_NT_HEADERS object"""
try:
dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = self.DllBase,
vm = self.obj_native_vm)
return dos_header.get_nt_header()
except ValueError:
return obj.NoneObject("Failed initial sanity checks")
except exceptions.SanityCheckException:
return obj.NoneObject("Failed initial sanity checks. Try -u or --unsafe")
def _directory(self, dir_index):
"""Return the requested IMAGE_DATA_DIRECTORY"""
nt_header = self._nt_header()
if nt_header == None:
raise ValueError('No directory index {0}'.format(dir_index))
data_dir = nt_header.OptionalHeader.DataDirectory[dir_index]
if data_dir == None:
raise ValueError('No directory index {0}'.format(dir_index))
# Make sure the directory exists
if data_dir.VirtualAddress == 0 or data_dir.Size == 0:
raise ValueError('No directory index {0}'.format(dir_index))
# Make sure the directory VA and Size are sane
if data_dir.VirtualAddress + data_dir.Size > nt_header.OptionalHeader.SizeOfImage:
raise ValueError('Invalid directory for index {0}'.format(dir_index))
return data_dir
def export_dir(self):
"""Return the IMAGE_DATA_DIRECTORY for exports"""
return self._directory(0) # DIRECTORY_ENTRY_EXPORT
def import_dir(self):
"""Return the IMAGE_DATA_DIRECTORY for imports"""
return self._directory(1) # DIRECTORY_ENTRY_IMPORT
def debug_dir(self):
"""Return the IMAGE_DEBUG_DIRECTORY for debug info"""
return self._directory(6) # IMAGE_DEBUG_DIRECTORY
def get_debug_directory(self):
"""Return the debug directory object for this PE"""
try:
data_dir = self.debug_dir()
except ValueError, why:
return obj.NoneObject(str(why))
return obj.Object("_IMAGE_DEBUG_DIRECTORY",
offset = self.DllBase + data_dir.VirtualAddress,
vm = self.obj_native_vm)
def getprocaddress(self, func):
"""Return the RVA of func"""
for _, f, n in self.exports():
if str(n or '') == func:
return f
return None
def imports(self):
"""
Generator for the PE's imported functions.
The _DIRECTORY_ENTRY_IMPORT.VirtualAddress points to an array
of _IMAGE_IMPORT_DESCRIPTOR structures. The end is reached when
the IID structure is all zeros.
"""
try:
data_dir = self.import_dir()
except ValueError, why:
raise StopIteration(why)
i = 0
desc_size = self.obj_vm.profile.get_obj_size('_IMAGE_IMPORT_DESCRIPTOR')
while 1:
desc = obj.Object('_IMAGE_IMPORT_DESCRIPTOR',
vm = self.obj_native_vm,
offset = self.DllBase + data_dir.VirtualAddress + (i * desc_size),
parent = self)
# Stop if the IID is paged or all zeros
if desc == None or desc.is_list_end():
break
# Stop if the IID contains invalid fields
if not desc.valid(self._nt_header()):
break
dll_name = desc.dll_name()
for o, f, n in desc._imported_functions():
yield dll_name, o, f, n
i += 1
def exports(self):
"""Generator for the PE's exported functions"""
try:
data_dir = self.export_dir()
except ValueError, why:
raise StopIteration(why)
expdir = obj.Object('_IMAGE_EXPORT_DIRECTORY',
offset = self.DllBase + data_dir.VirtualAddress,
vm = self.obj_native_vm,
parent = self)
if expdir.valid(self._nt_header()):
# Ordinal, Function RVA, and Name Object
for o, f, n in expdir._exported_functions():
yield o, f, n
class WinPEVTypes(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x : x == 'windows'}
def modification(self, profile):
profile.vtypes.update(pe_vtypes)
class WinPEx64VTypes(obj.ProfileModification):
before = ['WinPEVTypes']
conditions = {'os': lambda x : x == 'windows',
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(pe_vtypes_64)
class WinPEObjectClasses(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x : x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'_IMAGE_EXPORT_DIRECTORY': _IMAGE_EXPORT_DIRECTORY,
'_IMAGE_IMPORT_DESCRIPTOR': _IMAGE_IMPORT_DESCRIPTOR,
'_LDR_DATA_TABLE_ENTRY': _LDR_DATA_TABLE_ENTRY,
})
| gpl-2.0 |
tedder/ansible | lib/ansible/plugins/filter/k8s.py | 59 | 1064 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
try:
from openshift.helper.hashes import generate_hash
HAS_GENERATE_HASH = True
except ImportError:
HAS_GENERATE_HASH = False
from ansible.errors import AnsibleFilterError
def k8s_config_resource_name(resource):
if not HAS_GENERATE_HASH:
raise AnsibleFilterError("k8s_config_resource_name requires openshift>=0.7.2")
try:
return resource['metadata']['name'] + '-' + generate_hash(resource)
except KeyError:
raise AnsibleFilterError("resource must have a metadata.name key to generate a resource name")
# ---- Ansible filters ----
class FilterModule(object):
def filters(self):
return {
'k8s_config_resource_name': k8s_config_resource_name
}
| gpl-3.0 |
rgeleta/odoo | addons/hr_payroll/wizard/hr_payroll_payslips_by_employees.py | 337 | 3443 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil import relativedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_payslip_employees(osv.osv_memory):
_name ='hr.payslip.employees'
_description = 'Generate payslips for all selected employees'
_columns = {
'employee_ids': fields.many2many('hr.employee', 'hr_employee_group_rel', 'payslip_id', 'employee_id', 'Employees'),
}
def compute_sheet(self, cr, uid, ids, context=None):
emp_pool = self.pool.get('hr.employee')
slip_pool = self.pool.get('hr.payslip')
run_pool = self.pool.get('hr.payslip.run')
slip_ids = []
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
run_data = {}
if context and context.get('active_id', False):
run_data = run_pool.read(cr, uid, [context['active_id']], ['date_start', 'date_end', 'credit_note'])[0]
from_date = run_data.get('date_start', False)
to_date = run_data.get('date_end', False)
credit_note = run_data.get('credit_note', False)
if not data['employee_ids']:
raise osv.except_osv(_("Warning!"), _("You must select employee(s) to generate payslip(s)."))
for emp in emp_pool.browse(cr, uid, data['employee_ids'], context=context):
slip_data = slip_pool.onchange_employee_id(cr, uid, [], from_date, to_date, emp.id, contract_id=False, context=context)
res = {
'employee_id': emp.id,
'name': slip_data['value'].get('name', False),
'struct_id': slip_data['value'].get('struct_id', False),
'contract_id': slip_data['value'].get('contract_id', False),
'payslip_run_id': context.get('active_id', False),
'input_line_ids': [(0, 0, x) for x in slip_data['value'].get('input_line_ids', False)],
'worked_days_line_ids': [(0, 0, x) for x in slip_data['value'].get('worked_days_line_ids', False)],
'date_from': from_date,
'date_to': to_date,
'credit_note': credit_note,
}
slip_ids.append(slip_pool.create(cr, uid, res, context=context))
slip_pool.compute_sheet(cr, uid, slip_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ptemplier/ansible | lib/ansible/modules/system/firewalld.py | 8 | 37091 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in output of firewall-cmd --get-services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
interface:
description:
- 'The interface you would like to add/remove to/from a zone in firewalld'
required: false
default: null
version_added: "2.1"
zone:
description:
- >
The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices
can be extended based on per-system configs, listed here are "out of the box" defaults).
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- >
Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can
operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)
required: false
default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
masquerade:
description:
- 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
required: false
default: null
version_added: "2.1"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld:
service: https
permanent: true
state: enabled
- firewalld:
port: 8081/tcp
permanent: true
state: disabled
- firewalld:
port: 161-162/udp
permanent: true
state: enabled
- firewalld:
zone: dmz
service: http
permanent: true
state: enabled
- firewalld:
rich_rule: 'rule service name="ftp" audit limit value="1/m" accept'
permanent: true
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: true
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: true
zone: dmz
'''
from ansible.module_utils.basic import AnsibleModule
import sys
#####################
# Globals
#
fw = None
module = None
fw_offline = False
Rich_Rule = None
FirewallClientZoneSettings = None
#####################
# exception handling
#
def action_handler(action_func, action_func_args):
"""
Function to wrap calls to make actions on firewalld in try/except
logic and emit (hopefully) useful error messages
"""
msgs = []
try:
return action_func(*action_func_args)
except Exception:
# Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
# If there are any commonly known errors that we should provide more
# context for to help the users diagnose what's wrong. Handle that here
if "INVALID_SERVICE" in "%s" % e:
msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)")
if len(msgs) > 0:
module.fail_json(
msg='ERROR: Exception caught: %s %s' % (e, ', '.join(msgs))
)
else:
module.fail_json(msg='ERROR: Exception caught: %s' % e)
#####################
# fw_offline helpers
#
def get_fw_zone_settings(zone):
if fw_offline:
fw_zone = fw.config.get_zone(zone)
fw_settings = FirewallClientZoneSettings(
list(fw.config.get_zone_config(fw_zone))
)
else:
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
return (fw_zone, fw_settings)
def update_fw_settings(fw_zone, fw_settings):
if fw_offline:
fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
fw_zone.update(fw_settings)
#####################
# masquerade handling
#
def get_masquerade_enabled(zone):
if fw.queryMasquerade(zone) is True:
return True
else:
return False
def get_masquerade_enabled_permanent(zone):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if fw_settings.getMasquerade() is True:
return True
else:
return False
def set_masquerade_enabled(zone):
fw.addMasquerade(zone)
def set_masquerade_disabled(zone):
fw.removeMasquerade(zone)
def set_masquerade_permanent(zone, masquerade):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.setMasquerade(masquerade)
update_fw_settings(fw_zone, fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
if fw_offline:
fw_zone, fw_settings = get_fw_zone_settings(zone)
ports_list = fw_settings.getPorts()
else:
ports_list = fw.getPorts(zone)
if port_proto in ports_list:
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addPort(port, protocol)
update_fw_settings(fw_zone, fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removePort(port, protocol)
update_fw_settings(fw_zone, fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addSource(source)
update_fw_settings(fw_zone, fw_settings)
def remove_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeSource(source)
update_fw_settings(fw_zone, fw_settings)
####################
# interface handling
#
def get_interface(zone, interface):
if fw_offline:
fw_zone, fw_settings = get_fw_zone_settings(zone)
interface_list = fw_settings.getInterfaces()
else:
interface_list = fw.getInterfaces(zone)
if interface in fw.getInterfaces(zone):
return True
else:
return False
def change_zone_of_interface(zone, interface):
fw.changeZoneOfInterface(zone, interface)
def remove_interface(zone, interface):
fw.removeInterface(zone, interface)
def get_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if interface in fw_settings.getInterfaces():
return True
else:
return False
def change_zone_of_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if fw_offline:
iface_zone_objs = [ ]
for zone in fw.config.get_zones():
old_zone_obj = fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
module.fail_json(
msg = 'ERROR: interface {} is in {} zone XML file, can only be in one'.format(
interface,
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != zone:
old_zone_settings = FirewallClientZoneSettings(
fw.config.get_zone_config(old_zone_obj)
)
old_zone_settings.removeInterface(interface) # remove from old
fw.config.set_zone_config(old_zone_obj, old_zone_settings.settings)
fw_settings.addInterface(interface) # add to new
fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = fw.config().getZoneOfInterface(interface)
if old_zone_name != zone:
if old_zone_name:
old_zone_obj = fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def remove_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeInterface(interface)
update_fw_settings(fw_zone, fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addService(service)
update_fw_settings(fw_zone, fw_settings)
def set_service_disabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeService(service)
update_fw_settings(fw_zone, fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw.getRichRules(zone):
return True
else:
return False
def set_rich_rule_enabled(zone, rule, timeout):
fw.addRichRule(zone, rule, timeout)
def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addRichRule(rule)
update_fw_settings(fw_zone, fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeRichRule(rule)
update_fw_settings(fw_zone, fw_settings)
def main():
global module
## make module global so we don't have to pass it to action_handler every
## function call
global module
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
immediate=dict(type='bool',default=False),
source=dict(required=False,default=None),
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
interface=dict(required=False,default=None),
masquerade=dict(required=False,default=None),
offline=dict(type='bool',required=False,default=None),
),
supports_check_mode=True
)
## Handle running (online) daemon vs non-running (offline) daemon
global fw
global fw_offline
global Rich_Rule
global FirewallClientZoneSettings
## Imports
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import Rich_Rule
from firewall.client import FirewallClient
fw = None
fw_offline = False
try:
fw = FirewallClient()
fw.getDefaultZone()
except AttributeError:
## Firewalld is not currently running, permanent-only operations
## Import other required parts of the firewalld API
##
## NOTE:
## online and offline operations do not share a common firewalld API
from firewall.core.fw_test import Firewall_test
from firewall.client import FirewallClientZoneSettings
fw = Firewall_test()
fw.start()
fw_offline = True
except ImportError:
## Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required '
'(3.0.9 or newer for offline operations) \n %s' % e)
if fw_offline:
## Pre-run version checking
if FW_VERSION < "0.3.9":
module.fail_json(msg='unsupported version of firewalld, offline operations require >= 3.0.9')
else:
## Pre-run version checking
if FW_VERSION < "0.2.11":
module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
## Check for firewalld running
try:
if fw.connected is False:
module.fail_json(msg='firewalld service must be running, or try with offline=true')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
installed version (%s) likely too old. Requires firewalld >= 2.0.11" % FW_VERSION)
## Verify required params are provided
if module.params['source'] is None and module.params['permanent'] is None:
module.fail_json(msg='permanent is a required parameter')
if module.params['interface'] is not None and module.params['zone'] is None:
module.fail(msg='zone is a required parameter')
if module.params['immediate'] and fw_offline:
module.fail(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
## Global Vars
changed=False
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
if module.params['port'] is not None:
port, protocol = module.params['port'].strip().split('/')
if protocol is None:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
if module.params['zone'] is not None:
zone = module.params['zone']
else:
if fw_offline:
zone = fw.get_default_zone()
else:
zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
modification_count = 0
if service is not None:
modification_count += 1
if port is not None:
modification_count += 1
if rich_rule is not None:
modification_count += 1
if interface is not None:
modification_count += 1
if masquerade is not None:
modification_count += 1
if modification_count > 1:
module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
if service is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_service_enabled_permanent,
(zone, service)
)
is_enabled_immediate = action_handler(
get_service_enabled,
(zone, service)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_service_enabled_permanent,
(zone, service)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_service_enabled,
(zone, service, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_service_disabled_permanent,
(zone, service)
)
changed=True
if is_enabled_immediate:
action_handler(
set_service_disabled,
(zone, service)
)
changed=True
elif permanent and not immediate:
is_enabled = action_handler(
get_service_enabled_permanent,
(zone, service)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_enabled_permanent,
(zone, service)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_disabled_permanent,
(zone, service)
)
changed=True
elif immediate and not permanent:
is_enabled = action_handler(
get_service_enabled,
(zone, service)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_enabled,
(zone, service, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_disabled,
(zone, service)
)
changed=True
if changed is True:
msgs.append("Changed service %s to %s" % (service, desired_state))
# FIXME - source type does not handle non-permanent mode, this was an
# oversight in the past.
if source is not None:
is_enabled = action_handler(get_source, (zone, source))
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(add_source, (zone, source))
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(remove_source, (zone, source))
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
if port is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_port_enabled_permanent,
(zone,[port, protocol])
)
is_enabled_immediate = action_handler(
get_port_enabled,
(zone, [port, protocol])
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_port_enabled_permanent,
(zone, port, protocol)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_port_enabled,
(zone, port, protocol, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_port_disabled_permanent,
(zone, port, protocol)
)
changed=True
if is_enabled_immediate:
action_handler(
set_port_disabled,
(zone, port, protocol)
)
changed=True
elif permanent and not immediate:
is_enabled = action_handler(
get_port_enabled_permanent,
(zone, [port, protocol])
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_enabled_permanent,
(zone, port, protocol)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_disabled_permanent,
(zone, port, protocol)
)
changed=True
if immediate and not permanent:
is_enabled = action_handler(
get_port_enabled,
(zone, [port,protocol])
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_enabled,
(zone, port, protocol, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_disabled,
(zone, port, protocol)
)
changed=True
if changed is True:
msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
desired_state))
if rich_rule is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_rich_rule_enabled_permanent,
(zone, rich_rule)
)
is_enabled_immediate = action_handler(
get_rich_rule_enabled,
(zone, rich_rule)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_rich_rule_enabled_permanent,
(zone, rich_rule)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_rich_rule_enabled,
(zone, rich_rule, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_rich_rule_disabled_permanent,
(zone, rich_rule)
)
changed=True
if is_enabled_immediate:
action_handler(
set_rich_rule_disabled,
(zone, rich_rule)
)
changed=True
if permanent and not immediate:
is_enabled = action_handler(
get_rich_rule_enabled_permanent,
(zone, rich_rule)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_enabled_permanent,
(zone, rich_rule)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_disabled_permanent,
(zone, rich_rule)
)
changed=True
if immediate and not permanent:
is_enabled = action_handler(
get_rich_rule_enabled,
(zone, rich_rule)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_enabled,
(zone, rich_rule, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_disabled,
(zone, rich_rule)
)
changed=True
if changed is True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_interface_permanent,
(zone, interface)
)
is_enabled_immediate = action_handler(
get_interface,
(zone, interface)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
change_zone_of_interface_permanent(zone, interface)
changed=True
if not is_enabled_immediate:
change_zone_of_interface(zone, interface)
changed=True
if changed:
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
remove_interface_permanent(zone, interface)
changed=True
if is_enabled_immediate:
remove_interface(zone, interface)
changed=True
if changed:
msgs.append("Removed %s from zone %s" % (interface, zone))
elif permanent and not immediate:
is_enabled = action_handler(
get_interface_permanent,
(zone, interface)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface_permanent(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface_permanent(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
elif immediate and not permanent:
is_enabled = action_handler(
get_interface,
(zone, interface)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
if masquerade is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_masquerade_enabled_permanent,
(zone,)
)
is_enabled_immediate = action_handler(get_masquerade_enabled, (zone,))
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(set_masquerade_permanent, (zone, True))
changed=True
if not is_enabled_immediate:
action_handler(set_masquerade_enabled, (zone,))
changed=True
if changed:
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(set_masquerade_permanent, (zone, False))
changed=True
if is_enabled_immediate:
action_handler(set_masquerade_disabled, (zone,))
changed=True
if changed:
msgs.append("Removed masquerade from zone %s" % (zone))
elif permanent and not immediate:
is_enabled = action_handler(get_masquerade_enabled_permanent, (zone,))
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_permanent, (zone, True))
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_permanent, (zone, False))
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
elif immediate and not permanent:
is_enabled = action_handler(get_masquerade_enabled, (zone,))
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_enabled, (zone))
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_disabled, (zone))
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
if fw_offline:
msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
if __name__ == '__main__':
main()
| gpl-3.0 |
Boggart/ByondTools | scripts/checkForStringIdiocy.py | 2 | 6922 | #!/usr/bin/env python
import os, sys, re
"""
Usage:
$ python fix_string_idiocy.py path/to/your.dme .dm
NOTE: NOT PERFECT, CREATES code-fixed DIRECTORY.
*** MERGE THIS MANUALLY OR YOU WILL BREAK SHIT. ***
fix_string_idiocy.py - Combines multiple string append operations in DreamMaker code
Copyright 2013 Rob "N3X15" Nelson <nexis@7chan.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
REGEX_TO_COMBINE_AS_BLOCK = re.compile('^(?P<tabs>\t+)(?P<declaration>var/)?(?P<identifier>[A-Za-z\.]+)\s*(?P<operator>\+?)=\s*"(?P<content>.+)"\s*$')
def ProcessFile(filename):
fuckups = []
with open(filename, 'r') as f:
lastID = ''
declaring = False
lastLevel = 0
lastWasAlert = False
buffa = ''
tempbuffa = ''
tempfuckup = ''
tempBackup = ''
origIndentLevel = 0
ln = 0
for line in f:
ln += 1
m = REGEX_TO_COMBINE_AS_BLOCK.match(line)
if m is not None:
level = m.group('tabs').count('\t')
ID = m.group('identifier')
content = m.group('content').strip()
indent = '\t' * level
# indentMore = '\t' * (level + 1)
if ID == lastID and level == lastLevel:
if not lastWasAlert:
buffa += '\n' + indent + '// AUTOFIXED BY fix_string_idiocy.py'
buffa += '\n' + indent + '// ' + tempfuckup
buffa += '\n' + tempbuffa
print(tempfuckup)
fuckups.append(tempfuckup)
msg = '{0}:{1}: {2}'.format(filename, ln, line.strip())
print(msg)
fuckups.append(msg)
buffa += '\n'
# buffa += indentMore
buffa += content
lastWasAlert = True
else:
if lastWasAlert:
buffa += '"}'
buffa += '\n' + ('\t' * origIndentLevel) + '// END AUTOFIX'
buffa += '\n'
lastWasAlert = False
if tempBackup != '':
buffa += tempBackup
tempBackup = line
tempbuffa = indent
origIndentLevel = level
if m.group('declaration') is None:
tempbuffa += '{0} {2}= {{"{1}'.format(ID, content, m.group('operator'))
else:
tempbuffa += 'var/{0} {2}= {{"{1}'.format(ID, content, m.group('operator'))
tempfuckup = '{0}:{1}: {2}'.format(filename, ln, line.strip())
lastID = ID
lastLevel = level
else:
if line.strip() == '':
tempBackup += line
continue
if lastWasAlert:
buffa += '"}'
buffa += '\n' + indent + '// END AUTOFIX'
buffa += '\n'
lastWasAlert = False
tempBackup = ''
if tempBackup != '':
buffa += tempBackup
tempBackup = ''
lastID = ''
lastLevel = ''
buffa += line
fixpaths = ['code', 'interface', 'RandomZLevels', '_maps']
fixpath=filename
for fp in fixpaths:
fixpath = fixpath.replace(fp + os.sep, fp + '-fixed' + os.sep)
if len(fuckups) > 0:
if not os.path.isdir(os.path.dirname(fixpath)):
os.makedirs(os.path.dirname(fixpath))
with open(fixpath, 'w') as fixes:
fixes.write(buffa)
else:
if os.path.isfile(fixpath):
print('RM {0} ({1})'.format(fixpath,os.sep))
os.remove(fixpath)
# print(' Processed - {0} lines.'.format(ln))
return fuckups
def ProcessFilesFromDME(dmefile='baystation12.dme', ext='.dm'):
numFilesTotal = 0
fileFuckups = {}
rootdir = os.path.dirname(dmefile)
with open(os.path.join(rootdir, 'stringcounts.csv'), 'w') as csv:
with open(dmefile, 'r') as dmeh:
for line in dmeh:
if line.startswith('#include'):
inString = False
# escaped=False
filename = ''
for c in line:
"""
if c == '\\' and not escaped:
escaped = True
continue
if escaped:
if
escaped = False
continue
"""
if c == '"':
inString = not inString
if not inString:
filepath = os.path.join(rootdir, filename.replace('\\',os.sep))
if filepath.endswith(ext):
# print('Processing {0}...'.format(filepath))
fileFuckups[filepath] = ProcessFile(filepath)
numFilesTotal += 1
filename = ''
continue
else:
if inString:
filename += c
if os.path.isdir(sys.argv[1]):
for root, _, files in os.walk(sys.argv[1]):
for filename in files:
filepath = os.path.join(root, filename)
if filepath.endswith('.dme'):
ProcessFilesFromDME(filepath, sys.argv[2])
sys.exit(0)
if os.path.isfile(sys.argv[1]):
ProcessFilesFromDME(sys.argv[1], sys.argv[2])
| mit |
eltomello/qutebrowser | tests/unit/keyinput/conftest.py | 8 | 1650 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>:
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""pytest fixtures for tests.keyinput."""
import pytest
from unittest import mock
from qutebrowser.utils import objreg
BINDINGS = {'test': {'<Ctrl-a>': 'ctrla',
'a': 'a',
'ba': 'ba',
'ax': 'ax',
'ccc': 'ccc',
'0': '0'},
'test2': {'foo': 'bar', '<Ctrl+X>': 'ctrlx'},
'normal': {'a': 'a', 'ba': 'ba'}}
@pytest.yield_fixture
def fake_keyconfig():
"""Create a mock of a KeyConfiguration and register it into objreg."""
bindings = dict(BINDINGS) # so the bindings can be changed later
fake_keyconfig = mock.Mock(spec=['get_bindings_for'])
fake_keyconfig.get_bindings_for.side_effect = lambda s: bindings[s]
objreg.register('key-config', fake_keyconfig)
yield bindings
objreg.delete('key-config')
| gpl-3.0 |
ZJvandeWeg/bugfree-batman | tools/sample_bots/python/ants.py | 3 | 9309 | #!/usr/bin/env python
import sys
import traceback
import random
try:
from sys import maxint
except ImportError:
from sys import maxsize as maxint
MY_ANT = 0
ANTS = 0
DEAD = -1
LAND = -2
FOOD = -3
WATER = -4
UNSEEN = -5
HILL = -6
PLAYER_ANT = 'abcdefghij'
HILL_ANT = string = 'ABCDEFGHIJ'
PLAYER_HILL = string = '0123456789'
MAP_OBJECT = '?%*.!'
MAP_RENDER = PLAYER_ANT + HILL_ANT + PLAYER_HILL + MAP_OBJECT
AIM = {'n': (-1, 0),
'e': (0, 1),
's': (1, 0),
'w': (0, -1)}
RIGHT = {'n': 'e',
'e': 's',
's': 'w',
'w': 'n'}
LEFT = {'n': 'w',
'e': 'n',
's': 'e',
'w': 's'}
BEHIND = {'n': 's',
's': 'n',
'e': 'w',
'w': 'e'}
class Ants():
def __init__(self):
self.width = None
self.height = None
self.map = None
self.ant_list = {}
self.food_list = []
self.dead_list = []
self.hill_list = {}
def setup(self, data):
'parse initial input and setup starting game state'
for line in data.split('\n'):
line = line.strip().lower()
if len(line) > 0:
tokens = line.split()
key = tokens[0]
if key == 'cols':
self.width = int(tokens[1])
elif key == 'rows':
self.height = int(tokens[1])
elif key == 'player_seed':
random.seed(int(tokens[1]))
elif key == 'turntime':
self.turntime = int(tokens[1])
elif key == 'loadtime':
self.loadtime = int(tokens[1])
elif key == 'viewradius2':
self.viewradius2 = int(tokens[1])
elif key == 'attackradius2':
self.attackradius2 = int(tokens[1])
elif key == 'spawnradius2':
self.spawnradius2 = int(tokens[1])
self.map = [[LAND for col in range(self.width)]
for row in range(self.height)]
def update(self, data):
# clear ant and food data
for (row, col), owner in self.ant_list.items():
self.map[row][col] = LAND
self.ant_list = {}
for row, col in self.food_list:
self.map[row][col] = LAND
self.food_list = []
for row, col in self.dead_list:
self.map[row][col] = LAND
self.dead_list = []
for (row, col), owner in self.hill_list.items():
self.map[row][col] = LAND
self.hill_list = {}
# update map and create new ant and food lists
for line in data.split('\n'):
line = line.strip().lower()
if len(line) > 0:
tokens = line.split()
if len(tokens) >= 3:
row = int(tokens[1])
col = int(tokens[2])
if tokens[0] == 'a':
owner = int(tokens[3])
self.map[row][col] = owner
self.ant_list[(row, col)] = owner
elif tokens[0] == 'f':
self.map[row][col] = FOOD
self.food_list.append((row, col))
elif tokens[0] == 'w':
self.map[row][col] = WATER
elif tokens[0] == 'd':
# food could spawn on a spot where an ant just died
# don't overwrite the space unless it is land
if self.map[row][col] == LAND:
self.map[row][col] = DEAD
# but always add to the dead list
self.dead_list.append((row, col))
elif tokens[0] == 'h':
owner = int(tokens[3])
self.hill_list[(row, col)] = owner
def issue_order(self, order):
sys.stdout.write('o %s %s %s\n' % (order[0], order[1], order[2]))
sys.stdout.flush()
def finish_turn(self):
sys.stdout.write('go\n')
sys.stdout.flush()
def my_ants(self):
return [loc for loc, owner in self.ant_list.items()
if owner == MY_ANT]
def enemy_ants(self):
return [(loc, owner) for loc, owner in self.ant_list.items()
if owner != MY_ANT]
def my_hills(self):
return [loc for loc, owner in self.hill_list.items()
if owner == MY_ANT]
def enemy_hills(self):
return [(loc, owner) for loc, owner in self.hill_list.items()
if owner != MY_ANT]
def food(self):
return self.food_list[:]
def passable(self, row, col):
return self.map[row][col] != WATER
def unoccupied(self, row, col):
return self.map[row][col] in (LAND, DEAD, UNSEEN)
def destination(self, row, col, direction):
d_row, d_col = AIM[direction]
return ((row + d_row) % self.height, (col + d_col) % self.width)
def distance(self, row1, col1, row2, col2):
row1 = row1 % self.height
row2 = row2 % self.height
col1 = col1 % self.width
col2 = col2 % self.width
d_col = min(abs(col1 - col2), self.width - abs(col1 - col2))
d_row = min(abs(row1 - row2), self.height - abs(row1 - row2))
return d_col + d_row
def direction(self, row1, col1, row2, col2):
d = []
row1 = row1 % self.height
row2 = row2 % self.height
col1 = col1 % self.width
col2 = col2 % self.width
if row1 < row2:
if row2 - row1 >= self.height//2:
d.append('n')
if row2 - row1 <= self.height//2:
d.append('s')
if row2 < row1:
if row1 - row2 >= self.height//2:
d.append('s')
if row1 - row2 <= self.height//2:
d.append('n')
if col1 < col2:
if col2 - col1 >= self.width//2:
d.append('w')
if col2 - col1 <= self.width//2:
d.append('e')
if col2 < col1:
if col1 - col2 >= self.width//2:
d.append('e')
if col1 - col2 <= self.width//2:
d.append('w')
return d
def closest_food(self,row1,col1,filter=None):
#find the closest food from this row/col
min_dist=maxint
closest_food = None
for food in self.food_list:
if filter is None or food not in filter:
dist = self.distance(row1,col1,food[0],food[1])
if dist<min_dist:
min_dist = dist
closest_food = food
return closest_food
def closest_enemy_ant(self,row1,col1,filter=None):
#find the closest enemy ant from this row/col
min_dist=maxint
closest_ant = None
for ant in self.enemy_ants():
if filter is None or ant not in filter:
dist = self.distance(row1,col1,ant[0][0],ant[0][1])
if dist<min_dist:
min_dist = dist
closest_ant = ant[0]
return closest_ant
def closest_enemy_hill(self,row1,col1,filter=None):
#find the closest enemy hill from this row/col
min_dist=maxint
closest_hill = None
for hill in self.enemy_hills():
if filter is None or hill[0] not in filter:
dist = self.distance(row1,col1,hill[0][0],hill[0][1])
if dist<min_dist:
min_dist = dist
closest_hill = hill[0]
return closest_hill
def closest_unseen(self,row1,col1,filter=None):
#find the closest unseen from this row/col
min_dist=maxint
closest_unseen = None
for row in range(self.height):
for col in range(self.width):
if filter is None or (row, col) not in filter:
if self.map[row][col] == UNSEEN:
dist = self.distance(row1,col1,row,col)
if dist<min_dist:
min_dist = dist
closest_unseen = (row, col)
return closest_unseen
def render_text_map(self):
tmp = ''
for row in self.map:
tmp += '# %s\n' % ''.join([MAP_RENDER[col] for col in row])
return tmp
@staticmethod
def run(bot):
ants = Ants()
map_data = ''
while(True):
try:
current_line = sys.stdin.readline().rstrip('\r\n') # strip new line char
if current_line.lower() == 'ready':
ants.setup(map_data)
ants.finish_turn()
map_data = ''
elif current_line.lower() == 'go':
ants.update(map_data)
bot.do_turn(ants)
ants.finish_turn()
map_data = ''
else:
map_data += current_line + '\n'
except EOFError:
break
except Exception as e:
traceback.print_exc(file=sys.stderr)
break
| gpl-2.0 |
c-PRIMED/puq | test/host_test.py | 1 | 5151 | from __future__ import absolute_import, division, print_function
from puq.hosts import Host
from puq.jobqueue import JobQueue
from logging import debug
class TestHost(Host):
def __init__(self, cpus=0, cpus_per_node=0, walltime='1:00:00', pack=1):
Host.__init__(self)
if cpus <= 0:
print("You must specify cpus when creating a PBSHost object.")
raise ValueError()
if cpus_per_node <= 0:
print("You must specify cpus_per_node when creating a PBSHost object.")
raise ValueError()
self.cpus = cpus
self.cpus_per_node = cpus_per_node
self.walltime = walltime
self.jobs = []
self.wqueue = []
self.wlist = []
self.pack = pack
self.scaling = False
self.jnum = 0
@staticmethod
def job_status(j):
j['status'] = 'F'
def add_job(self, cmd, dir, cpu, outfile):
if cpu == 0:
cpu = self.cpus
else:
self.scaling = True
num = len(self.jobs)
self.jobs.append({'num': num,
'cmd': cmd,
'cpu': cpu,
'dir': dir,
'outfile': outfile,
'status': 0,
'job': '',
'secs': 0,
'walltime': self.walltime})
def check(self, pbsjob):
"""
Returns the status of PBS jobs.
'F' = Finished
'Q' = Queued
'R' = Running
'U' = Unknown
"""
pbsjob['job_state'] = 'F'
def submit(self, cmd, joblist, walltime):
global output
cpu = joblist[0]['cpu']
cpn = self.cpus_per_node
nodes = int((cpu + cpn - 1) / cpn)
walltime = self.secs_to_walltime(walltime)
output.append({'job': self.jnum,
'cpu': cpu,
'cpu': cpn,
'nodes': nodes,
'walltime': walltime,
'cmd': cmd})
job = joblist[0]['num']+100
for j in joblist:
j['job'] = job
j['status'] = 'Q'
d = {'jnum': self.jnum, 'joblist': joblist, 'jobid': job}
self.jnum += 1
return d
def run(self):
jobq = JobQueue(self, limit=10, polltime=1)
for j in self.jobs:
if j['status'] == 0 or j['status'] == 'Q':
debug("adding job %s" % j['num'])
jobq.add(j)
jobq.start()
return jobq.join() == []
def test_Host0():
global output
output = []
th = TestHost(cpus=1, cpus_per_node=1, walltime='10:00', pack=1)
th.add_job('foobar', '', 0, 'xxx')
th.run()
assert len(output) == 1
assert output[0]['walltime'] == '0:10:00'
def test_Host1():
global output
output = []
th = TestHost(cpus=1, cpus_per_node=1, walltime='10:00', pack=1)
th.add_job('foobar -1', '', 0, 'xxx')
th.add_job('foobar -2', '', 0, 'xxx')
th.add_job('foobar -3', '', 0, 'xxx')
th.add_job('foobar -4', '', 0, 'xxx')
th.run()
assert len(output) == 4
assert output[0]['walltime'] == '0:10:00'
def test_Host2():
global output
output = []
th = TestHost(cpus=1, cpus_per_node=1, walltime='10:00', pack=4)
th.add_job('foobar -1', '', 0, 'xxx')
th.add_job('foobar -2', '', 0, 'xxx')
th.add_job('foobar -3', '', 0, 'xxx')
th.add_job('foobar -4', '', 0, 'xxx')
th.run()
assert len(output) == 1
assert output[0]['walltime'] == '0:40:00'
def test_Host3():
global output
output = []
th = TestHost(cpus=2, cpus_per_node=4, walltime='10:00', pack=1)
for i in range(11):
th.add_job('foobar', '', 0, 'xxx')
th.run()
assert len(output) == 6
assert output[0]['walltime'] == '0:10:00'
def test_Host4():
global output
output = []
th = TestHost(cpus=2, cpus_per_node=4, walltime='10:00', pack=3)
for i in range(11):
th.add_job('foobar', '', 0, 'xxx')
th.run()
assert len(output) == 2
assert output[0]['walltime'] == '0:30:00'
def test_Host5():
global output
output = []
th = TestHost(cpus=22, cpus_per_node=4, walltime='10:00', pack=1)
th.add_job('foobar', '', 0, 'xxx')
th.add_job('foobar', '', 0, 'xxx')
th.run()
assert len(output) == 2
assert output[0]['walltime'] == '0:10:00'
assert output[1]['walltime'] == '0:10:00'
assert output[0]['nodes'] == 6
assert output[1]['nodes'] == 6
assert output[0]['cpu'] == 4
assert output[1]['cpu'] == 4
def test_HostMultiRun():
global output
output = []
th = TestHost(cpus=1, cpus_per_node=1, walltime='10:00', pack=1)
th.add_job('foobar -1', '', 0, 'xxx')
th.add_job('foobar -2', '', 0, 'xxx')
th.add_job('foobar -3', '', 0, 'xxx')
th.add_job('foobar -4', '', 0, 'xxx')
th.run()
print('-' * 80)
th.add_job('foobar -5', '', 0, 'xxx')
th.add_job('foobar -6', '', 0, 'xxx')
th.add_job('foobar -7', '', 0, 'xxx')
th.add_job('foobar -8', '', 0, 'xxx')
th.run()
print(output)
| mit |
biziwalker/mtasa | vendor/google-breakpad/src/tools/gyp/test/linux/gyptest-implicit-rpath.py | 252 | 1172 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the implicit rpath is added only when needed.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform.startswith('linux'):
test = TestGyp.TestGyp(formats=['ninja', 'make'])
CHDIR = 'implicit-rpath'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetRpaths(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'Library rpath: \[([^\]]+)\]')
proc = subprocess.Popen(['readelf', '-d', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
return r.findall(o)
if test.format == 'ninja':
expect = '$ORIGIN/lib/'
elif test.format == 'make':
expect = '$ORIGIN/lib.target/'
else:
test.fail_test()
if GetRpaths('shared_executable') != [expect]:
test.fail_test()
if GetRpaths('shared_executable_no_so_suffix') != [expect]:
test.fail_test()
if GetRpaths('static_executable'):
test.fail_test()
test.pass_test()
| gpl-3.0 |
mszewczy/odoo | openerp/report/render/rml2pdf/customfonts.py | 261 | 3493 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 P. Christeas, Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP SA. (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab import rl_config
import logging
import glob
import os
# .apidoc title: TTF Font Table
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
CustomTTFonts = []
# Search path for TTF files, in addition of rl_config.TTFSearchPath
TTFSearchPath = [
'/usr/share/fonts/truetype', # SuSE
'/usr/share/fonts/dejavu', '/usr/share/fonts/liberation', # Fedora, RHEL
'/usr/share/fonts/truetype/*','/usr/local/share/fonts' # Ubuntu,
'/usr/share/fonts/TTF/*', # Mandriva/Mageia
'/usr/share/fonts/TTF', # Arch Linux
'/usr/lib/openoffice/share/fonts/truetype/',
'~/.fonts',
'~/.local/share/fonts',
# mac os X - from
# http://developer.apple.com/technotes/tn/tn2024.html
'~/Library/Fonts',
'/Library/Fonts',
'/Network/Library/Fonts',
'/System/Library/Fonts',
# windows
'c:/winnt/fonts',
'c:/windows/fonts'
]
def list_all_sysfonts():
"""
This function returns list of font directories of system.
"""
filepath = []
# Perform the search for font files ourselves, as reportlab's
# TTFOpenFile is not very good at it.
searchpath = list(set(TTFSearchPath + rl_config.TTFSearchPath))
for dirname in searchpath:
for filename in glob.glob(os.path.join(os.path.expanduser(dirname), '*.[Tt][Tt][FfCc]')):
filepath.append(filename)
return filepath
def SetCustomFonts(rmldoc):
""" Map some font names to the corresponding TTF fonts
The ttf font may not even have the same name, as in
Times -> Liberation Serif.
This function is called once per report, so it should
avoid system-wide processing (cache it, instead).
"""
for family, font, filename, mode in CustomTTFonts:
if os.path.isabs(filename) and os.path.exists(filename):
rmldoc.setTTFontMapping(family, font, filename, mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
juja256/des | perm.py | 1 | 1457 | from bitarray import *
class Permutation:
def __init__(self, l):
# if sorted(l) != range(1, len(l)+1):
# raise ValueError("List is not valid!")
self.__bare = [i - 1 for i in l]
def Get(self):
return self.__bare
def Reverse(self):
rev = [0] * len(self.__bare)
for i in range(0, len(self.__bare)):
rev[self.__bare[i]] = i + 1
return Permutation(rev)
def Substitude(self, msg):
"""
Substitudes all bits in input message
"""
bits = bitarray()
if type(msg) == str:
bits.frombytes(msg)
elif type(msg) == bitarray:
bits = msg
else:
raise ValueError("Not valid type of input data")
res = bitarray(bits.length() * [0])
size = len(self.__bare)
for i in range(0, bits.length()):
res[i] = bits[(i / size) * size + self.__bare[i % size]]
return res
def Reduce(self, block, size):
"""
Shrinks or extends block to specified size with permutation
"""
bits = bitarray()
if type(block) == str:
bits.frombytes(block)
elif type(block) == bitarray:
bits = block
else:
raise ValueError("Not valid type of input data")
res = bitarray(size * [0])
for i in range(0, size):
res[i] = bits[self.__bare[i]]
return res | gpl-2.0 |
xuxiao19910803/edx | lms/djangoapps/bulk_email/migrations/0003_add_optout_user.py | 182 | 6032 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Optout.user'
db.add_column('bulk_email_optout', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True),
keep_default=False)
# Removing unique constraint on 'Optout', fields ['course_id', 'email']
db.delete_unique('bulk_email_optout', ['course_id', 'email'])
# Adding unique constraint on 'Optout', fields ['course_id', 'user']
db.create_unique('bulk_email_optout', ['course_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'Optout', fields ['course_id', 'user']
db.delete_unique('bulk_email_optout', ['course_id', 'user_id'])
# Deleting field 'Optout.email'
db.delete_column('bulk_email_optout', 'user_id')
# Creating unique constraint on 'Optout', fields ['course_id', 'email']
db.create_unique('bulk_email_optout', ['course_id', 'email'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
| agpl-3.0 |
flyingk/mavlink | pymavlink/generator/mavgen_python.py | 14 | 23904 | #!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a python implementation
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import sys, textwrap, os
from . import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
def generate_preamble(outf, msgs, basename, args, xml):
print("Generating preamble")
t.write(outf, """
'''
MAVLink protocol implementation (auto-generated by mavgen.py)
Generated from: ${FILELIST}
Note: this file has been auto-generated. DO NOT EDIT
'''
import struct, array, time, json, os, sys, platform
from ...generator.mavcrc import x25crc
WIRE_PROTOCOL_VERSION = "${WIRE_PROTOCOL_VERSION}"
DIALECT = "${DIALECT}"
native_supported = platform.system() != 'Windows' # Not yet supported on other dialects
native_force = 'MAVNATIVE_FORCE' in os.environ # Will force use of native code regardless of what client app wants
native_testing = 'MAVNATIVE_TESTING' in os.environ # Will force both native and legacy code to be used and their results compared
if native_supported:
try:
import mavnative
except ImportError:
print("ERROR LOADING MAVNATIVE - falling back to python implementation")
native_supported = False
# some base types from mavlink_types.h
MAVLINK_TYPE_CHAR = 0
MAVLINK_TYPE_UINT8_T = 1
MAVLINK_TYPE_INT8_T = 2
MAVLINK_TYPE_UINT16_T = 3
MAVLINK_TYPE_INT16_T = 4
MAVLINK_TYPE_UINT32_T = 5
MAVLINK_TYPE_INT32_T = 6
MAVLINK_TYPE_UINT64_T = 7
MAVLINK_TYPE_INT64_T = 8
MAVLINK_TYPE_FLOAT = 9
MAVLINK_TYPE_DOUBLE = 10
class MAVLink_header(object):
'''MAVLink message header'''
def __init__(self, msgId, mlen=0, seq=0, srcSystem=0, srcComponent=0):
self.mlen = mlen
self.seq = seq
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.msgId = msgId
def pack(self):
return struct.pack('BBBBBB', ${PROTOCOL_MARKER}, self.mlen, self.seq,
self.srcSystem, self.srcComponent, self.msgId)
class MAVLink_message(object):
'''base MAVLink message class'''
def __init__(self, msgId, name):
self._header = MAVLink_header(msgId)
self._payload = None
self._msgbuf = None
self._crc = None
self._fieldnames = []
self._type = name
def get_msgbuf(self):
if isinstance(self._msgbuf, bytearray):
return self._msgbuf
return bytearray(self._msgbuf)
def get_header(self):
return self._header
def get_payload(self):
return self._payload
def get_crc(self):
return self._crc
def get_fieldnames(self):
return self._fieldnames
def get_type(self):
return self._type
def get_msgId(self):
return self._header.msgId
def get_srcSystem(self):
return self._header.srcSystem
def get_srcComponent(self):
return self._header.srcComponent
def get_seq(self):
return self._header.seq
def __str__(self):
ret = '%s {' % self._type
for a in self._fieldnames:
v = getattr(self, a)
ret += '%s : %s, ' % (a, v)
ret = ret[0:-2] + '}'
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if other == None:
return False
if self.get_type() != other.get_type():
return False
# We do not compare CRC because native code doesn't provide it
#if self.get_crc() != other.get_crc():
# return False
if self.get_seq() != other.get_seq():
return False
if self.get_srcSystem() != other.get_srcSystem():
return False
if self.get_srcComponent() != other.get_srcComponent():
return False
for a in self._fieldnames:
if getattr(self, a) != getattr(other, a):
return False
return True
def to_dict(self):
d = dict({})
d['mavpackettype'] = self._type
for a in self._fieldnames:
d[a] = getattr(self, a)
return d
def to_json(self):
return json.dumps(self.to_dict())
def pack(self, mav, crc_extra, payload):
self._payload = payload
self._header = MAVLink_header(self._header.msgId, len(payload), mav.seq,
mav.srcSystem, mav.srcComponent)
self._msgbuf = self._header.pack() + payload
crc = x25crc(self._msgbuf[1:])
if ${crc_extra}: # using CRC extra
crc.accumulate_str(struct.pack('B', crc_extra))
self._crc = crc.crc
self._msgbuf += struct.pack('<H', self._crc)
return self._msgbuf
""", {'FILELIST' : ",".join(args),
'PROTOCOL_MARKER' : xml.protocol_marker,
'DIALECT' : os.path.splitext(os.path.basename(basename))[0],
'crc_extra' : xml.crc_extra,
'WIRE_PROTOCOL_VERSION' : xml.wire_protocol_version })
def generate_enums(outf, enums):
print("Generating enums")
outf.write('''
# enums
class EnumEntry(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.param = {}
enums = {}
''')
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" # ")
for e in enums:
outf.write("\n# %s\n" % e.name)
outf.write("enums['%s'] = {}\n" % e.name)
for entry in e.entry:
outf.write("%s = %u # %s\n" % (entry.name, entry.value, wrapper.fill(entry.description)))
outf.write("enums['%s'][%d] = EnumEntry('%s', '''%s''')\n" % (e.name,
int(entry.value), entry.name,
entry.description))
for param in entry.param:
outf.write("enums['%s'][%d].param[%d] = '''%s'''\n" % (e.name,
int(entry.value),
int(param.index),
param.description))
def generate_message_ids(outf, msgs):
print("Generating message IDs")
outf.write("\n# message IDs\n")
outf.write("MAVLINK_MSG_ID_BAD_DATA = -1\n")
for m in msgs:
outf.write("MAVLINK_MSG_ID_%s = %u\n" % (m.name.upper(), m.id))
def generate_classes(outf, msgs):
print("Generating class definitions")
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
for m in msgs:
classname = "MAVLink_%s_message" % m.name.lower()
fieldname_str = ", ".join(map(lambda s: "'%s'" % s, m.fieldnames))
ordered_fieldname_str = ", ".join(map(lambda s: "'%s'" % s, m.ordered_fieldnames))
outf.write("""
class %s(MAVLink_message):
'''
%s
'''
id = MAVLINK_MSG_ID_%s
name = '%s'
fieldnames = [%s]
ordered_fieldnames = [ %s ]
format = '%s'
native_format = bytearray('%s', 'ascii')
orders = %s
lengths = %s
array_lengths = %s
crc_extra = %s
def __init__(self""" % (classname, wrapper.fill(m.description.strip()),
m.name.upper(),
m.name.upper(),
fieldname_str,
ordered_fieldname_str,
m.fmtstr,
m.native_fmtstr,
m.order_map,
m.len_map,
m.array_len_map,
m.crc_extra))
if len(m.fields) != 0:
outf.write(", " + ", ".join(m.fieldnames))
outf.write("):\n")
outf.write(" MAVLink_message.__init__(self, %s.id, %s.name)\n" % (classname, classname))
outf.write(" self._fieldnames = %s.fieldnames\n" % (classname))
for f in m.fields:
outf.write(" self.%s = %s\n" % (f.name, f.name))
outf.write("""
def pack(self, mav):
return MAVLink_message.pack(self, mav, %u, struct.pack('%s'""" % (m.crc_extra, m.fmtstr))
for field in m.ordered_fields:
if (field.type != "char" and field.array_length > 1):
for i in range(field.array_length):
outf.write(", self.{0:s}[{1:d}]".format(field.name,i))
else:
outf.write(", self.{0:s}".format(field.name))
outf.write("))\n")
def native_mavfmt(field):
'''work out the struct format for a type (in a form expected by mavnative)'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'v',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
return map[field.type]
def mavfmt(field):
'''work out the struct format for a type'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
if field.array_length:
if field.type == 'char':
return str(field.array_length)+'s'
return str(field.array_length)+map[field.type]
return map[field.type]
def generate_mavlink_class(outf, msgs, xml):
print("Generating MAVLink class")
outf.write("\n\nmavlink_map = {\n");
for m in msgs:
outf.write(" MAVLINK_MSG_ID_%s : MAVLink_%s_message,\n" % (m.name.upper(), m.name.lower()))
outf.write("}\n\n")
t.write(outf, """
class MAVError(Exception):
'''MAVLink error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVString(str):
'''NUL terminated string'''
def __init__(self, s):
str.__init__(self)
def __str__(self):
i = self.find(chr(0))
if i == -1:
return self[:]
return self[0:i]
class MAVLink_bad_data(MAVLink_message):
'''
a piece of bad data in a mavlink stream
'''
def __init__(self, data, reason):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BAD_DATA, 'BAD_DATA')
self._fieldnames = ['data', 'reason']
self.data = data
self.reason = reason
self._msgbuf = data
def __str__(self):
'''Override the __str__ function from MAVLink_messages because non-printable characters are common in to be the reason for this message to exist.'''
return '%s {%s, data:%s}' % (self._type, self.reason, [('%x' % ord(i) if isinstance(i, str) else '%x' % i) for i in self.data])
class MAVLink(object):
'''MAVLink protocol handling class'''
def __init__(self, file, srcSystem=0, srcComponent=0, use_native=False):
self.seq = 0
self.file = file
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.callback = None
self.callback_args = None
self.callback_kwargs = None
self.send_callback = None
self.send_callback_args = None
self.send_callback_kwargs = None
self.buf = bytearray()
self.expected_length = 8
self.have_prefix_error = False
self.robust_parsing = False
self.protocol_marker = ${protocol_marker}
self.little_endian = ${little_endian}
self.crc_extra = ${crc_extra}
self.sort_fields = ${sort_fields}
self.total_packets_sent = 0
self.total_bytes_sent = 0
self.total_packets_received = 0
self.total_bytes_received = 0
self.total_receive_errors = 0
self.startup_time = time.time()
if native_supported and (use_native or native_testing or native_force):
print("NOTE: mavnative is currently beta-test code")
self.native = mavnative.NativeConnection(MAVLink_message, mavlink_map)
else:
self.native = None
if native_testing:
self.test_buf = bytearray()
def set_callback(self, callback, *args, **kwargs):
self.callback = callback
self.callback_args = args
self.callback_kwargs = kwargs
def set_send_callback(self, callback, *args, **kwargs):
self.send_callback = callback
self.send_callback_args = args
self.send_callback_kwargs = kwargs
def send(self, mavmsg):
'''send a MAVLink message'''
buf = mavmsg.pack(self)
self.file.write(buf)
self.seq = (self.seq + 1) % 256
self.total_packets_sent += 1
self.total_bytes_sent += len(buf)
if self.send_callback:
self.send_callback(mavmsg, *self.send_callback_args, **self.send_callback_kwargs)
def bytes_needed(self):
'''return number of bytes needed for next parsing stage'''
if self.native:
ret = self.native.expected_length - len(self.buf)
else:
ret = self.expected_length - len(self.buf)
if ret <= 0:
return 1
return ret
def __parse_char_native(self, c):
'''this method exists only to see in profiling results'''
m = self.native.parse_chars(c)
return m
def __callbacks(self, msg):
'''this method exists only to make profiling results easier to read'''
if self.callback:
self.callback(msg, *self.callback_args, **self.callback_kwargs)
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
self.buf.extend(c)
self.total_bytes_received += len(c)
if self.native:
if native_testing:
self.test_buf.extend(c)
m = self.__parse_char_native(self.test_buf)
m2 = self.__parse_char_legacy()
if m2 != m:
print("Native: %s\\nLegacy: %s\\n" % (m, m2))
raise Exception('Native vs. Legacy mismatch')
else:
m = self.__parse_char_native(self.buf)
else:
m = self.__parse_char_legacy()
if m != None:
self.total_packets_received += 1
self.__callbacks(m)
return m
def __parse_char_legacy(self):
'''input some data bytes, possibly returning a new message (uses no native code)'''
if len(self.buf) >= 1 and self.buf[0] != ${protocol_marker}:
magic = self.buf[0]
self.buf = self.buf[1:]
if self.robust_parsing:
m = MAVLink_bad_data(chr(magic), "Bad prefix")
self.expected_length = 8
self.total_receive_errors += 1
return m
if self.have_prefix_error:
return None
self.have_prefix_error = True
self.total_receive_errors += 1
raise MAVError("invalid MAVLink prefix '%s'" % magic)
self.have_prefix_error = False
if len(self.buf) >= 2:
if sys.version_info[0] < 3:
(magic, self.expected_length) = struct.unpack('BB', str(self.buf[0:2])) # bytearrays are not supported in py 2.7.3
else:
(magic, self.expected_length) = struct.unpack('BB', self.buf[0:2])
self.expected_length += 8
if self.expected_length >= 8 and len(self.buf) >= self.expected_length:
mbuf = array.array('B', self.buf[0:self.expected_length])
self.buf = self.buf[self.expected_length:]
self.expected_length = 8
if self.robust_parsing:
try:
m = self.decode(mbuf)
except MAVError as reason:
m = MAVLink_bad_data(mbuf, reason.message)
self.total_receive_errors += 1
else:
m = self.decode(mbuf)
return m
return None
def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret
def decode(self, msgbuf):
'''decode a buffer as a MAVLink message'''
# decode the header
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('cBBBBB', msgbuf[:6])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
if ord(magic) != ${protocol_marker}:
raise MAVError("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-8:
raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u' % (len(msgbuf)-8, mlen, msgId))
if not msgId in mavlink_map:
raise MAVError('unknown MAVLink message ID %u' % msgId)
# decode the payload
type = mavlink_map[msgId]
fmt = type.format
order_map = type.orders
len_map = type.lengths
crc_extra = type.crc_extra
# decode the checksum
try:
crc, = struct.unpack('<H', msgbuf[-2:])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg)
crcbuf = msgbuf[1:-2]
if ${crc_extra}: # using CRC extra
crcbuf.append(crc_extra)
crc2 = x25crc(crcbuf)
if crc != crc2.crc:
raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
try:
t = struct.unpack(fmt, msgbuf[6:-2])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(msgbuf[6:-2]), emsg))
tlist = list(t)
# handle sorted fields
if ${sort_fields}:
t = tlist[:]
if sum(len_map) == len(len_map):
# message has no arrays in it
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
else:
# message has some arrays
tlist = []
for i in range(0, len(order_map)):
order = order_map[i]
L = len_map[order]
tip = sum(len_map[:order])
field = t[tip]
if L == 1 or isinstance(field, str):
tlist.append(field)
else:
tlist.append(t[tip:(tip + L)])
# terminate any strings
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = str(MAVString(tlist[i]))
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._msgbuf = msgbuf
m._payload = msgbuf[6:-2]
m._crc = crc
m._header = MAVLink_header(msgId, mlen, seq, srcSystem, srcComponent)
return m
""", xml)
def generate_methods(outf, msgs):
print("Generating methods")
def field_descriptions(fields):
ret = ""
for f in fields:
ret += " %-18s : %s (%s)\n" % (f.name, f.description.strip(), f.type)
return ret
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" ")
for m in msgs:
comment = "%s\n\n%s" % (wrapper.fill(m.description.strip()), field_descriptions(m.fields))
selffieldnames = 'self, '
for f in m.fields:
if f.omit_arg:
selffieldnames += '%s=%s, ' % (f.name, f.const_value)
else:
selffieldnames += '%s, ' % f.name
selffieldnames = selffieldnames[:-2]
sub = {'NAMELOWER' : m.name.lower(),
'SELFFIELDNAMES' : selffieldnames,
'COMMENT' : comment,
'FIELDNAMES' : ", ".join(m.fieldnames)}
t.write(outf, """
def ${NAMELOWER}_encode(${SELFFIELDNAMES}):
'''
${COMMENT}
'''
return MAVLink_${NAMELOWER}_message(${FIELDNAMES})
""", sub)
t.write(outf, """
def ${NAMELOWER}_send(${SELFFIELDNAMES}):
'''
${COMMENT}
'''
return self.send(self.${NAMELOWER}_encode(${FIELDNAMES}))
""", sub)
def generate(basename, xml):
'''generate complete python implemenation'''
if basename.endswith('.py'):
filename = basename
else:
filename = basename + '.py'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename))
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<'
else:
m.fmtstr = '>'
m.native_fmtstr = m.fmtstr
for f in m.ordered_fields:
m.fmtstr += mavfmt(f)
m.native_fmtstr += native_mavfmt(f)
m.order_map = [ 0 ] * len(m.fieldnames)
m.len_map = [ 0 ] * len(m.fieldnames)
m.array_len_map = [ 0 ] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i])
m.array_len_map[i] = m.ordered_fields[i].array_length
for i in range(0, len(m.fieldnames)):
n = m.order_map[i]
m.len_map[n] = m.fieldlengths[i]
print("Generating %s" % filename)
outf = open(filename, "w")
generate_preamble(outf, msgs, basename, filelist, xml[0])
generate_enums(outf, enums)
generate_message_ids(outf, msgs)
generate_classes(outf, msgs)
generate_mavlink_class(outf, msgs, xml[0])
generate_methods(outf, msgs)
outf.close()
print("Generated %s OK" % filename)
| lgpl-3.0 |
shacker/django | tests/expressions_case/models.py | 55 | 2542 | from django.db import models
try:
from PIL import Image
except ImportError:
Image = None
class CaseTestModel(models.Model):
integer = models.IntegerField()
integer2 = models.IntegerField(null=True)
string = models.CharField(max_length=100, default='')
big_integer = models.BigIntegerField(null=True)
binary = models.BinaryField(default=b'')
boolean = models.BooleanField(default=False)
date = models.DateField(null=True, db_column='date_field')
date_time = models.DateTimeField(null=True)
decimal = models.DecimalField(max_digits=2, decimal_places=1, null=True, db_column='decimal_field')
duration = models.DurationField(null=True)
email = models.EmailField(default='')
file = models.FileField(null=True, db_column='file_field')
file_path = models.FilePathField(null=True)
float = models.FloatField(null=True, db_column='float_field')
if Image:
image = models.ImageField(null=True)
generic_ip_address = models.GenericIPAddressField(null=True)
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField(null=True)
positive_small_integer = models.PositiveSmallIntegerField(null=True)
slug = models.SlugField(default='')
small_integer = models.SmallIntegerField(null=True)
text = models.TextField(default='')
time = models.TimeField(null=True, db_column='time_field')
url = models.URLField(default='')
uuid = models.UUIDField(null=True)
fk = models.ForeignKey('self', models.CASCADE, null=True)
def __str__(self):
return "%i, %s" % (self.integer, self.string)
class O2OCaseTestModel(models.Model):
o2o = models.OneToOneField(CaseTestModel, models.CASCADE, related_name='o2o_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.o2o)
class FKCaseTestModel(models.Model):
fk = models.ForeignKey(CaseTestModel, models.CASCADE, related_name='fk_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.fk)
class Client(models.Model):
REGULAR = 'R'
GOLD = 'G'
PLATINUM = 'P'
ACCOUNT_TYPE_CHOICES = (
(REGULAR, 'Regular'),
(GOLD, 'Gold'),
(PLATINUM, 'Platinum'),
)
name = models.CharField(max_length=50)
registered_on = models.DateField()
account_type = models.CharField(
max_length=1,
choices=ACCOUNT_TYPE_CHOICES,
default=REGULAR,
)
def __str__(self):
return self.name
| bsd-3-clause |
RussTedrake/director | src/python/ddapp/valvedemo.py | 2 | 39678 | import math
import functools
import numpy as np
from ddapp import transformUtils
from ddapp import objectmodel as om
from ddapp import visualization as vis
from ddapp import applogic as app
from ddapp import ik
from ddapp.ikparameters import IkParameters
from ddapp import lcmUtils
from ddapp import robotstate
from ddapp import segmentation
from ddapp.tasks.taskuserpanel import TaskUserPanel
from ddapp.tasks.taskuserpanel import ImageBasedAffordanceFit
from ddapp.uuidutil import newUUID
import ddapp.tasks.robottasks as rt
from PythonQt import QtCore
class ValvePlannerDemo(object):
def __init__(self, robotModel, footstepPlanner, footstepsPanel, manipPlanner, ikPlanner,
lhandDriver, rhandDriver, sensorJointController):
self.robotModel = robotModel
self.footstepPlanner = footstepPlanner
self.footstepsPanel = footstepsPanel
self.manipPlanner = manipPlanner
self.ikPlanner = ikPlanner
self.lhandDriver = lhandDriver
self.rhandDriver = rhandDriver
self.sensorJointController = sensorJointController
self.graspingObject = 'valve'
self.setGraspingHand('left')
self.valveAffordance = None
self.graspFrame = None
self.stanceFrame = None
# live operation flags
self.planFromCurrentRobotState = False
self.plans = []
# IK server speed:
self.speedLow = 10
self.speedHigh = 60
self.speedTurn = 100
self.maxHandTranslationSpeed = 0.3
# reach to center and back - for palm point
self.graspFrameXYZ = [0.0, 0.0, -0.1]
self.graspFrameRPY = [90, 0, 180]
self.nominalPelvisXYZ = None
self.useLargeValveDefaults()
self.coaxialTol = 0.001
self.coaxialGazeTol = 2
self.shxMaxTorque = 40
self.elxMaxTorque = 10
self.elxLowerBoundDegrees = 30
self.reachPoseName = None
self.touchPose = None
self.quasiStaticShrinkFactor = 0.5
self.lockBack = True
self.lockBase = True
self.nominalPoseName = 'q_valve_nom'
self.startPoseName = 'q_valve_start'
self.setupStance()
def setGraspingHand(self, side):
self.graspingHand = side
self.setupStance()
def useLargeValveDefaults(self):
# distance above the valve axis for the hand center
self.reachHeight = 0.0
# distance away from valve for palm face on approach reach
self.reachDepth = -0.1
# distance away from valve for palm face on retraction
self.retractDepth = -0.05
# distance away from valve for palm face on approach reach
self.touchDepth = 0.05
self.openAmount = 20
self.closedAmount = 20
self.smallValve = False
def useSmallValveDefaults(self):
# distance above the valve axis for the hand center
self.reachHeight = 0.0
# distance away from valve for palm face on approach reach
self.reachDepth = -0.05
# distance away from valve for palm face on retraction
self.retractDepth = -0.05
# distance away from valve for palm face on approach reach
self.touchDepth = 0.01
self.openAmount = 0
self.closedAmount = 50
self.smallValve = True
def setupStance(self):
self.relativeStanceXYZInitial = [-0.9, -0.3, 0.0]
self.relativeStanceRPYInitial = [0, 0, 0]
self.relativeStanceXYZ = self.relativeStanceXYZInitial
self.relativeStanceRPY = self.relativeStanceRPYInitial
# mirror stance and rotation direction for right hand:
if self.graspingHand == 'right':
self.relativeStanceXYZ[1] = -self.relativeStanceXYZ[1]
self.relativeStanceRPY[2] = -self.relativeStanceRPY[2]
def addPlan(self, plan):
self.plans.append(plan)
def computeGroundFrame(self, robotModel):
'''
Given a robol model, returns a vtkTransform at a position between
the feet, on the ground, with z-axis up and x-axis aligned with the
robot pelvis x-axis.
'''
t1 = robotModel.getLinkFrame( self.ikPlanner.leftFootLink )
t2 = robotModel.getLinkFrame( self.ikPlanner.rightFootLink )
pelvisT = robotModel.getLinkFrame( self.ikPlanner.pelvisLink )
xaxis = [1.0, 0.0, 0.0]
pelvisT.TransformVector(xaxis, xaxis)
xaxis = np.array(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
stancePosition = (np.array(t2.GetPosition()) + np.array(t1.GetPosition())) / 2.0
footHeight = 0.0811
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(stancePosition)
t.Translate([0.0, 0.0, -footHeight])
return t
def computeRobotStanceFrame(self, objectTransform, relativeStanceTransform):
'''
Given a robot model, determine the height of the ground using an XY and
Yaw standoff, combined to determine the relative 6DOF standoff For a
grasp or approach stance
'''
groundFrame = self.footstepPlanner.getFeetMidPoint(self.robotModel)
groundHeight = groundFrame.GetPosition()[2]
graspPosition = np.array(objectTransform.GetPosition())
graspYAxis = [0.0, 1.0, 0.0]
graspZAxis = [0.0, 0.0, 1.0]
objectTransform.TransformVector(graspYAxis, graspYAxis)
objectTransform.TransformVector(graspZAxis, graspZAxis)
xaxis = graspYAxis
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
graspGroundTransform = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
graspGroundTransform.PostMultiply()
graspGroundTransform.Translate(graspPosition[0], graspPosition[1], groundHeight)
robotStance = transformUtils.copyFrame(relativeStanceTransform)
robotStance.Concatenate(graspGroundTransform)
return robotStance
def updatePointcloudSnapshot(self):
if (self.useLidar is True):
return vis.updatePolyData(segmentation.getCurrentRevolutionData(),
'pointcloud snapshot', parent='segmentation')
else:
return vis.updatePolyData(segmentation.getDisparityPointCloud(4),
'pointcloud snapshot', parent='segmentation')
# Valve Focused Functions ##################################################
def onImageViewDoubleClick(self, displayPoint, modifiers, imageView):
if modifiers != QtCore.Qt.ControlModifier:
return
imagePixel = imageView.getImagePixel(displayPoint)
cameraPos, ray = imageView.getWorldPositionAndRay(imagePixel)
polyData = self.updatePointcloudSnapshot().polyData
pickPoint = segmentation.extractPointsAlongClickRay(cameraPos, ray,
polyData)
om.removeFromObjectModel(om.findObjectByName('valve'))
segmentation.segmentValveByBoundingBox(polyData, pickPoint)
self.findAffordance()
def getValveAffordance(self):
return om.findObjectByName('valve')
def computeStanceFrame(self, useIkTraj=False):
objectTransform = transformUtils.copyFrame(self.computeGraspFrame().transform)
if useIkTraj:
startPose = self.getNominalPose()
plan = self.planInsertTraj(self.speedLow, lockFeet=False, lockBase=False,
resetPoses=True, startPose=startPose)
stancePose = robotstate.convertStateMessageToDrakePose(plan.plan[0])
stanceRobotModel = self.ikPlanner.getRobotModelAtPose(stancePose)
self.nominalPelvisXYZ = stancePose[:3]
robotStance = self.footstepPlanner.getFeetMidPoint(stanceRobotModel)
else:
robotStance = self.computeRobotStanceFrame(objectTransform,
self.computeRelativeStanceTransform())
stanceFrame = vis.updateFrame(robotStance, 'valve grasp stance',
parent=self.getValveAffordance(), visible=False, scale=0.2)
stanceFrame.addToView(app.getDRCView())
return stanceFrame
def computeRelativeStanceTransform(self):
return transformUtils.copyFrame(
transformUtils.frameFromPositionAndRPY(self.relativeStanceXYZ, self.relativeStanceRPY))
def computeRelativeGraspTransform(self):
t = transformUtils.copyFrame(transformUtils.frameFromPositionAndRPY(self.graspFrameXYZ,
self.graspFrameRPY))
t.PostMultiply()
t.RotateX(180)
t.RotateY(-90)
return t
def computeGraspFrame(self):
t = self.computeRelativeGraspTransform()
t.Concatenate(self.getValveAffordance().getChildFrame().transform)
graspFrame = vis.updateFrame(t, 'valve grasp frame',
parent=self.getValveAffordance(),
visible=False, scale=0.2)
graspFrame.addToView(app.getDRCView())
return graspFrame
def spawnValveAffordance(self):
radius = 0.10
tubeRadius = 0.02
position = [0, 0, 1.2]
rpy = [0, 0, 0]
t_feet_mid = self.footstepPlanner.getFeetMidPoint(self.robotModel)
t = transformUtils.frameFromPositionAndRPY(position, rpy)
t_grasp = self.computeRelativeGraspTransform()
t_grasp.Concatenate(t)
t_stance = self.computeRobotStanceFrame(t_grasp, self.computeRelativeStanceTransform())
t_valve = t_stance.GetInverse()
# This is necessary to get the inversion to actually happen. We don't know why.
t_valve.GetMatrix()
t_valve.Concatenate(t)
t_valve.Concatenate(t_feet_mid)
pose = transformUtils.poseFromTransform(t_valve)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose,
Color=[0, 1, 0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
import affordancepanel
obj = affordancepanel.panel.affordanceFromDescription(desc)
obj.params = dict(radius=radius)
# End Valve Focused Functions ##############################################
# Planning Functions #######################################################
# These are operational conveniences:
def planFootstepsToStance(self, **kwargs):
f = transformUtils.copyFrame(self.computeStanceFrame(**kwargs).transform)
self.footstepsPanel.onNewWalkingGoal(f)
def planPreGrasp(self):
startPose = self.getPlanningStartPose()
endPose = self.ikPlanner.getMergedPostureFromDatabase(
startPose, 'General', 'arm up pregrasp', side=self.graspingHand)
newPlan = self.ikPlanner.computePostureGoal(startPose, endPose)
self.addPlan(newPlan)
def planNominal(self):
startPose = self.getPlanningStartPose()
endPose, info = self.ikPlanner.computeStandPose(startPose)
endPose = self.ikPlanner.getMergedPostureFromDatabase(endPose, 'General', 'safe nominal')
newPlan = self.ikPlanner.computePostureGoal(startPose, endPose)
self.addPlan(newPlan)
def createStaticTorqueConstraint(self):
if self.graspingHand == 'left':
elxJoint = 'l_arm_elx'
shxJoint = 'l_arm_shx'
else:
elxJoint = 'r_arm_elx'
shxJoint = 'r_arm_shx'
constraint = ik.GravityCompensationTorqueConstraint()
constraint.joints = [shxJoint, elxJoint]
constraint.torquesLowerBound = -np.array([self.shxMaxTorque, self.elxMaxTorque])
constraint.torquesUpperBound = np.array([self.shxMaxTorque, self.elxMaxTorque])
return constraint
def createElbowPostureConstraint(self):
if self.graspingHand == 'left':
elxJoint = 'l_arm_elx'
elxLowerBound = np.radians(self.elxLowerBoundDegrees)
elxUpperBound = 2.5
else:
elxJoint = 'r_arm_elx'
elxLowerBound = -2.5
elxUpperBound = np.radians(-self.elxLowerBoundDegrees)
constraint = ik.PostureConstraint()
constraint.joints = [elxJoint]
constraint.jointsLowerBound = [elxLowerBound]
constraint.jointsUpperBound = [elxUpperBound]
return constraint
def createWristAngleConstraint(self, wristAngleCW, planFromCurrentRobotState):
if self.graspingHand == 'left':
wristJoint = ['l_arm_lwy']
wristJointLowerBound = [-np.radians(160) + wristAngleCW]
wristJointUpperBound = [-np.radians(160) + wristAngleCW]
else:
wristJoint = ['r_arm_lwy']
wristJointLowerBound = [np.radians(160) - wristAngleCW]
wristJointUpperBound = [np.radians(160) - wristAngleCW]
constraint = ik.PostureConstraint()
constraint.joints = wristJoint
constraint.jointsLowerBound = wristJointLowerBound
constraint.jointsUpperBound = wristJointUpperBound
if planFromCurrentRobotState:
constraint.tspan = [1.0, 1.0]
return constraint
def createHandGazeConstraint(self):
constraint = self.ikPlanner.createGazeGraspConstraint(
self.graspingHand, self.computeGraspFrame(), coneThresholdDegrees=self.coaxialGazeTol)
constraint.tspan = [0.0, 1.0]
return constraint
def createHandFixedOrientConstraint(self):
if self.graspingHand == 'left':
handLink = 'l_hand'
else:
handLink = 'r_hand'
return ik.WorldFixedOrientConstraint(linkName=handLink)
def createBackPostureConstraint(self):
if self.lockBack:
return self.ikPlanner.createLockedBackPostureConstraint(self.startPoseName)
else:
return self.ikPlanner.createMovingBackLimitedPostureConstraint()
def createFootConstraints(self, lockFeet):
if lockFeet:
return self.ikPlanner.createFixedFootConstraints(self.startPoseName)
else:
constraints = []
constraints.extend(self.ikPlanner.createSlidingFootConstraints(self.startPoseName))
constraints.append(ik.WorldFixedBodyPoseConstraint(linkName='l_foot'))
constraints.append(ik.WorldFixedBodyPoseConstraint(linkName='r_foot'))
p = ik.RelativePositionConstraint()
p.bodyNameA = self.ikPlanner.leftFootLink
p.bodyNameB = self.ikPlanner.rightFootLink
p.positionTarget = np.array([0, 0.3, 0])
p.lowerBound = np.array([0, 0, -np.inf])
p.upperBound = np.array([0, 0, np.inf])
constraints.append(p)
p = ik.RelativePositionConstraint()
p.bodyNameA = self.ikPlanner.rightFootLink
p.bodyNameB = self.ikPlanner.leftFootLink
p.lowerBound = np.array([0, -np.inf, -np.inf])
p.upperBound = np.array([0, np.inf, np.inf])
constraints.append(p)
return constraints
def createHeadGazeConstraint(self):
valveCenter = np.array(self.computeGraspFrame().transform.GetPosition())
return ik.WorldGazeTargetConstraint(linkName='head', bodyPoint=np.zeros(3),
worldPoint=valveCenter, coneThreshold=np.radians(20))
def createBaseConstraints(self, resetBase, lockBase, lockFeet, yawDesired):
constraints = []
if lockBase is None:
lockBase = self.lockBase
if resetBase:
poseName = self.nominalPoseName
else:
poseName = self.startPoseName
if lockFeet:
if lockBase:
constraints.append(
self.ikPlanner.createLockedBasePostureConstraint(poseName, lockLegs=False))
else:
constraints.append(
self.ikPlanner.createXYZMovingBasePostureConstraint(poseName))
constraints.append(ik.WorldFixedBodyPoseConstraint(linkName='pelvis'))
else:
constraints.append(self.ikPlanner.createXYZYawMovingBasePostureConstraint(poseName))
constraints.append(ik.WorldFixedBodyPoseConstraint(linkName='pelvis'))
constraints.append(self.createHeadGazeConstraint())
p = ik.PostureConstraint()
p.joints = ['base_yaw']
p.jointsLowerBound = [yawDesired - np.radians(20)]
p.jointsUpperBound = [yawDesired + np.radians(20)]
constraints.append(p)
return constraints
def getStartPoseName(self, planFromCurrentRobotState, retract, usePoses):
if planFromCurrentRobotState:
poseName = self.startPoseName
else:
if not usePoses or self.reachPoseName is None:
poseName = self.nominalPoseName
else:
if retract:
poseName = self.touchPoseName
else:
poseName = self.reachPoseName
return poseName
def getEndPoseName(self, retract, usePoses):
if not usePoses or self.touchPose is None:
return self.nominalPoseName
else:
if retract:
return self.reachPoseName
else:
return self.touchPoseName
def createHandPositionConstraint(self, radialTol, axialLowerBound, axialUpperBound, tspan):
linkOffsetFrame = self.ikPlanner.getPalmToHandLink(self.graspingHand)
constraint = ik.PositionConstraint()
constraint.linkName = self.ikPlanner.getHandLink(self.graspingHand)
constraint.pointInLink = np.array(linkOffsetFrame.GetPosition())
constraint.referenceFrame = self.computeGraspFrame().transform
constraint.lowerBound = np.array([-radialTol, axialLowerBound, -radialTol])
constraint.upperBound = np.array([radialTol, axialUpperBound, radialTol])
constraint.tspan = tspan
return constraint
def createAllHandPositionConstraints(self, radialTol, retract):
constraints = []
# Constrain hand to lie on the valve axis between the reach and touch
# depths for the entire plan
constraints.append(self.createHandPositionConstraint(radialTol, self.reachDepth,
self.touchDepth, [0.0, 1.0]))
# Choose initial and final depths
if retract:
initialDepth = self.touchDepth
finalDepth = self.reachDepth
else:
initialDepth = self.reachDepth
finalDepth = self.touchDepth
# Constrain initial position of the hand along the valve axis
constraints.append(self.createHandPositionConstraint(np.inf, initialDepth, initialDepth,
[0.0, 0.0]))
# Constrain final position of the hand along the valve axis
constraints.append(self.createHandPositionConstraint(np.inf, finalDepth, finalDepth,
[1.0, 1.0]))
return constraints
def setReachAndTouchPoses(self, plan=None):
if plan is None:
self.reachPoseName = None
self.touchPoseName = None
self.reachPose = None
self.touchPose = None
else:
self.reachPoseName = 'q_reach'
self.touchPoseName = 'q_touch'
self.reachPose = robotstate.convertStateMessageToDrakePose(plan.plan[0])
self.touchPose = robotstate.convertStateMessageToDrakePose(plan.plan[-1])
self.ikPlanner.addPose(self.reachPose, self.reachPoseName)
self.ikPlanner.addPose(self.touchPose, self.touchPoseName)
def planInsertTraj(self, speed, lockFeet=True, lockBase=None, resetBase=False, wristAngleCW=0,
startPose=None, verticalOffset=0.01, usePoses=False, resetPoses=True,
planFromCurrentRobotState=False, retract=False):
ikParameters = IkParameters(usePointwise=False, maxDegreesPerSecond=speed,
numberOfAddedKnots=1,
quasiStaticShrinkFactor=self.quasiStaticShrinkFactor,
fixInitialState=planFromCurrentRobotState)
_, yaxis, _ = transformUtils.getAxesFromTransform(self.computeGraspFrame().transform)
yawDesired = np.arctan2(yaxis[1], yaxis[0])
if startPose is None:
startPose = self.getPlanningStartPose()
nominalPose = self.getNominalPose()
self.ikPlanner.addPose(nominalPose, self.nominalPoseName)
self.ikPlanner.addPose(startPose, self.startPoseName)
self.ikPlanner.reachingSide = self.graspingHand
constraints = []
constraints.extend(self.createBaseConstraints(resetBase, lockBase, lockFeet, yawDesired))
constraints.append(self.createBackPostureConstraint())
constraints.append(self.ikPlanner.createQuasiStaticConstraint())
constraints.extend(self.createFootConstraints(lockFeet))
constraints.append(self.ikPlanner.createLockedArmPostureConstraint(self.startPoseName))
constraints.append(self.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
constraints.append(self.createElbowPostureConstraint())
constraints.append(self.createStaticTorqueConstraint())
constraints.append(self.createHandGazeConstraint())
constraints.append(self.createHandFixedOrientConstraint())
constraints.append(self.createWristAngleConstraint(wristAngleCW,
planFromCurrentRobotState))
constraints.extend(self.createAllHandPositionConstraints(self.coaxialTol, retract))
if retract:
startPoseName = self.getStartPoseName(planFromCurrentRobotState, True, usePoses)
endPoseName = self.getEndPoseName(True, usePoses)
endPose = self.ikPlanner.jointController.getPose(endPoseName)
endPose = self.ikPlanner.mergePostures(endPose, robotstate.matchJoints('lwy'), startPose)
endPoseName = 'q_retract'
self.ikPlanner.addPose(endPose, endPoseName)
else:
startPoseName = self.getStartPoseName(planFromCurrentRobotState, retract, usePoses)
endPoseName = self.getEndPoseName(retract, usePoses)
plan = self.ikPlanner.runIkTraj(constraints, startPoseName, endPoseName, self.nominalPoseName, ikParameters=ikParameters)
if resetPoses and not retract and max(plan.plan_info) <= 10:
self.setReachAndTouchPoses(plan)
return plan
def planReach(self, verticalOffset=None, **kwargs):
startPose = self.getPlanningStartPose()
insert_plan = self.planInsertTraj(self.speedHigh, lockFeet=True, usePoses=True,
resetPoses=True, **kwargs)
info = max(insert_plan.plan_info)
reachPose = robotstate.convertStateMessageToDrakePose(insert_plan.plan[0])
ikParameters = IkParameters(maxDegreesPerSecond=2*self.speedTurn,
rescaleBodyNames=[self.ikPlanner.getHandLink(side=self.graspingHand)],
rescaleBodyPts=list(self.ikPlanner.getPalmPoint(side=self.graspingHand)),
maxBodyTranslationSpeed=self.maxHandTranslationSpeed)
plan = self.ikPlanner.computePostureGoal(startPose, reachPose, ikParameters=ikParameters)
plan.plan_info = [info]*len(plan.plan_info)
lcmUtils.publish('CANDIDATE_MANIP_PLAN', plan)
self.addPlan(plan)
def planTouch(self, **kwargs):
plan = self.planInsertTraj(self.speedLow, lockBase=True, lockFeet=True, usePoses=True,
resetPoses=False, planFromCurrentRobotState=True, **kwargs)
self.addPlan(plan)
def planTurn(self, wristAngleCW=np.radians(320)):
ikParameters = IkParameters(maxDegreesPerSecond=self.speedTurn)
startPose = self.getPlanningStartPose()
wristAngleCW = min(np.radians(320)-0.01, max(-np.radians(160)+0.01, wristAngleCW))
if self.graspingHand == 'left':
postureJoints = {'l_arm_lwy': -np.radians(160) + wristAngleCW}
else:
postureJoints = {'r_arm_lwy': np.radians(160) - wristAngleCW}
endPose = self.ikPlanner.mergePostures(startPose, postureJoints)
plan = self.ikPlanner.computePostureGoal(startPose, endPose, ikParameters=ikParameters)
app.displaySnoptInfo(1)
self.addPlan(plan)
def planRetract(self, **kwargs):
startPose = self.getPlanningStartPose()
if self.graspingHand == 'left':
jointId = robotstate.getDrakePoseJointNames().index('l_arm_lwy')
wristAngleCW = np.radians(160) + startPose[jointId]
else:
jointId = robotstate.getDrakePoseJointNames().index('r_arm_lwy')
wristAngleCW = np.radians(160) - startPose[jointId]
plan = self.planInsertTraj(self.speedLow, retract=True, lockBase=True, lockFeet=True,
usePoses=True, planFromCurrentRobotState=True, resetPoses=False,
wristAngleCW=wristAngleCW, **kwargs)
self.addPlan(plan)
def getNominalPose(self):
axes = transformUtils.getAxesFromTransform(self.computeGraspFrame().transform)
yaxis = axes[1]
yawDesired = np.arctan2(yaxis[1], yaxis[0])
seedDistance = 1
nominalPose = self.ikPlanner.jointController.getPose('q_nom')
nominalPose[0] = (self.computeGraspFrame().transform.GetPosition()[0] -
seedDistance*yaxis[0])
nominalPose[1] = (self.computeGraspFrame().transform.GetPosition()[1] -
seedDistance*yaxis[1])
nominalPose[5] = yawDesired
if self.scribeDirection == 1: # Clockwise
nominalPose = self.ikPlanner.getMergedPostureFromDatabase(nominalPose, 'valve', 'reach-nominal-cw', side=self.graspingHand)
else: # Counter-clockwise
nominalPose = self.ikPlanner.getMergedPostureFromDatabase(nominalPose, 'valve', 'reach-nominal-ccw', side=self.graspingHand)
return nominalPose
# Glue Functions ###########################################################
def moveRobotToStanceFrame(self, frame):
self.sensorJointController.setPose('q_nom')
stancePosition = frame.GetPosition()
stanceOrientation = frame.GetOrientation()
q = self.sensorJointController.q.copy()
q[:2] = [stancePosition[0], stancePosition[1]]
q[5] = math.radians(stanceOrientation[2])
self.sensorJointController.setPose('EST_ROBOT_STATE', q)
def getHandDriver(self, side):
assert side in ('left', 'right')
return self.lhandDriver if side == 'left' else self.rhandDriver
def openHand(self,side):
self.getHandDriver(side).sendCustom(0.0, 100.0, 100.0, 0)
def openPinch(self,side):
self.getHandDriver(side).sendCustom(20.0, 100.0, 100.0, 1)
def closeHand(self, side):
self.getHandDriver(side).sendCustom(100.0, 100.0, 100.0, 0)
def sendNeckPitchLookDown(self):
self.multisenseDriver.setNeckPitch(40)
def sendNeckPitchLookForward(self):
self.multisenseDriver.setNeckPitch(15)
def waitForAtlasBehaviorAsync(self, behaviorName):
assert behaviorName in self.atlasDriver.getBehaviorMap().values()
while self.atlasDriver.getCurrentBehaviorName() != behaviorName:
yield
def printAsync(self, s):
yield
print s
def optionalUserPrompt(self, message):
if not self.optionalUserPromptEnabled:
return
yield
result = raw_input(message)
if result != 'y':
raise Exception('user abort.')
def requiredUserPrompt(self, message):
if not self.requiredUserPromptEnabled:
return
yield
result = raw_input(message)
if result != 'y':
raise Exception('user abort.')
def delay(self, delayTimeInSeconds):
yield
t = SimpleTimer()
while t.elapsed() < delayTimeInSeconds:
yield
def waitForCleanLidarSweepAsync(self):
currentRevolution = self.multisenseDriver.displayedRevolution
desiredRevolution = currentRevolution + 2
while self.multisenseDriver.displayedRevolution < desiredRevolution:
yield
def getEstimatedRobotStatePose(self):
return self.sensorJointController.getPose('EST_ROBOT_STATE')
def getPlanningStartPose(self):
if self.planFromCurrentRobotState:
return self.sensorJointController.getPose('EST_ROBOT_STATE')
else:
if self.plans:
return robotstate.convertStateMessageToDrakePose(
self.plans[-1].plan[-1])
else:
return self.getEstimatedRobotStatePose()
def commitManipPlan(self):
self.manipPlanner.commitManipPlan(self.plans[-1])
# Nominal Plans and Execution #############################################
class ValveImageFitter(ImageBasedAffordanceFit):
def __init__(self, valveDemo):
ImageBasedAffordanceFit.__init__(self, numberOfPoints=2)
self.valveDemo = valveDemo
def onImageViewDoubleClick(self, displayPoint, modifiers, imageView):
self.valveDemo.onImageViewDoubleClick(displayPoint, modifiers, imageView)
def fit(self, polyData, points):
om.removeFromObjectModel(om.findObjectByName('valve'))
segmentation.segmentValveByRim(polyData, points[0], points[1])
class ValveTaskPanel(TaskUserPanel):
def __init__(self, valveDemo):
TaskUserPanel.__init__(self, windowTitle='Valve Task')
self.valveDemo = valveDemo
self.fitter = ValveImageFitter(self.valveDemo)
self.initImageView(self.fitter.imageView)
self.addDefaultProperties()
self.addButtons()
self.addTasks()
def addButtons(self):
self.addManualButton('Spawn Valve', self.onSpawnValveClicked)
self.addManualSpacer()
self.addManualButton('Footsteps', self.valveDemo.planFootstepsToStance)
self.addManualButton('Footsteps (IK)',
functools.partial(self.valveDemo.planFootstepsToStance,
useIkTraj=True))
self.addManualSpacer()
self.addManualButton('Raise arm', self.valveDemo.planPreGrasp)
self.addManualButton('Set fingers', self.setFingers)
self.addManualSpacer()
self.addManualButton('Reach', self.reach)
self.addManualButton('Touch', self.touch)
self.addManualButton('Turn', self.turnValve)
self.addManualButton('Retract', self.retract)
self.addManualSpacer()
self.addManualButton('Nominal', self.valveDemo.planNominal)
self.addManualSpacer()
self.addManualButton('Commit Manip', self.valveDemo.commitManipPlan)
def onSpawnValveClicked(self):
self.valveDemo.spawnValveAffordance()
def setFingers(self):
driver = self.valveDemo.getHandDriver(self.valveDemo.graspingHand)
driver.sendClose(self.valveDemo.openAmount)
def reach(self):
self.valveDemo.setReachAndTouchPoses()
self.valveDemo.planReach(wristAngleCW=self.initialWristAngleCW)
def touch(self):
self.valveDemo.planTouch(wristAngleCW=self.initialWristAngleCW)
def turnValve(self):
self.valveDemo.planTurn(wristAngleCW=self.finalWristAngleCW)
def retract(self):
self.valveDemo.planRetract()
def addDefaultProperties(self):
self.params.addProperty('Hand', 1, attributes=om.PropertyAttributes(enumNames=['Left',
'Right']))
self.params.addProperty('Turn direction', 1,
attributes=om.PropertyAttributes(enumNames=['Clockwise',
'Counter clockwise']))
self.params.addProperty('Valve size', 0,
attributes=om.PropertyAttributes(enumNames=['Large', 'Small']))
self.params.addProperty('Base', 0,
attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free']))
self.params.addProperty('Back', 1,
attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free']))
self._syncProperties()
def onPropertyChanged(self, propertySet, propertyName):
self._syncProperties()
self.taskTree.removeAllTasks()
self.addTasks()
def _syncProperties(self):
self.valveDemo.planFromCurrentRobotState = True
self.valveDemo.setGraspingHand(self.params.getPropertyEnumValue('Hand').lower())
if self.params.getPropertyEnumValue('Turn direction') == 'Clockwise':
self.valveDemo.scribeDirection = 1
self.initialWristAngleCW = 0
self.finalWristAngleCW = np.radians(320)
else:
self.valveDemo.scribeDirection = -1
self.initialWristAngleCW = np.radians(320)
self.finalWristAngleCW = 0
if self.params.getPropertyEnumValue('Valve size') == 'Large':
self.valveDemo.useLargeValveDefaults()
else:
self.valveDemo.useSmallValveDefaults()
if self.params.getPropertyEnumValue('Base') == 'Fixed':
self.valveDemo.lockBase = True
else:
self.valveDemo.lockBase = False
if self.params.getPropertyEnumValue('Back') == 'Fixed':
self.valveDemo.lockBack = True
else:
self.valveDemo.lockBack = False
def addTasks(self):
# some helpers
def addTask(task, parent=None):
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addManipulation(func, name, parent=None):
group = self.taskTree.addGroup(name, parent=parent)
addFunc(func, name='plan motion', parent=group)
addTask(rt.CheckPlanInfo(name='check manip plan info'), parent=group)
addFunc(v.commitManipPlan, name='execute manip plan', parent=group)
addTask(rt.WaitForManipulationPlanExecution(name='wait for manip execution'),
parent=group)
addTask(rt.UserPromptTask(name='Confirm execution has finished', message='Continue when plan finishes.'),
parent=group)
def addLargeValveTurn(parent=None):
group = self.taskTree.addGroup('Valve Turn', parent=parent)
# valve manip actions
addManipulation(functools.partial(v.planReach, wristAngleCW=self.initialWristAngleCW),
name='Reach to valve', parent=group)
addManipulation(functools.partial(v.planTouch, wristAngleCW=self.initialWristAngleCW),
name='Insert hand', parent=group)
addManipulation(functools.partial(v.planTurn, wristAngleCW=self.finalWristAngleCW),
name='Turn valve', parent=group)
addManipulation(v.planRetract, name='Retract hand', parent=group)
def addSmallValveTurn(parent=None):
group = self.taskTree.addGroup('Valve Turn', parent=parent)
side = 'Right' if v.graspingHand == 'right' else 'Left'
addManipulation(functools.partial(v.planReach, wristAngleCW=self.initialWristAngleCW),
name='Reach to valve', parent=group)
addManipulation(functools.partial(v.planTouch, wristAngleCW=self.initialWristAngleCW),
name='Insert hand', parent=group)
addTask(rt.CloseHand(name='grasp valve', side=side, mode='Basic',
amount=self.valveDemo.closedAmount),
parent=group)
addManipulation(functools.partial(v.planTurn, wristAngleCW=self.finalWristAngleCW),
name='plan turn valve', parent=group)
addTask(rt.CloseHand(name='release valve', side=side, mode='Basic',
amount=self.valveDemo.openAmount),
parent=group)
addManipulation(v.planRetract, name='plan retract', parent=group)
v = self.valveDemo
self.taskTree.removeAllTasks()
side = self.params.getPropertyEnumValue('Hand')
###############
# add the tasks
# prep
prep = self.taskTree.addGroup('Preparation')
addTask(rt.CloseHand(name='close left hand', side='Left'), parent=prep)
addTask(rt.CloseHand(name='close right hand', side='Right'), parent=prep)
# fit
fit = self.taskTree.addGroup('Fitting')
addTask(rt.UserPromptTask(name='fit valve',
message='Please fit and approve valve affordance.'), parent=fit)
addTask(rt.FindAffordance(name='check valve affordance', affordanceName='valve'),
parent=fit)
# walk
walk = self.taskTree.addGroup('Approach')
addFunc(v.planFootstepsToStance, 'plan walk to valve', parent=walk)
addTask(rt.UserPromptTask(name='approve footsteps',
message='Please approve footstep plan.'), parent=walk)
addTask(rt.CommitFootstepPlan(name='walk to valve',
planName='valve grasp stance footstep plan'), parent=walk)
addTask(rt.SetNeckPitch(name='set neck position', angle=35), parent=walk)
addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walk)
# refit
refit = self.taskTree.addGroup('Re-fitting')
addTask(rt.UserPromptTask(name='fit valve',
message='Please fit and approve valve affordance.'),
parent=refit)
# set fingers
addTask(rt.CloseHand(name='set finger positions', side=side, mode='Basic',
amount=self.valveDemo.openAmount), parent=refit)
# add valve turns
if v.smallValve:
for i in range(0, 2):
addSmallValveTurn()
else:
for i in range(0, 2):
addLargeValveTurn()
# go to finishing posture
prep = self.taskTree.addGroup('Prep for walking')
addTask(rt.CloseHand(name='close left hand', side='Left'), parent=prep)
addTask(rt.CloseHand(name='close right hand', side='Right'), parent=prep)
addTask(rt.PlanPostureGoal(name='plan walk posture', postureGroup='General',
postureName='safe nominal', side='Default'), parent=prep)
addTask(rt.CommitManipulationPlan(name='execute manip plan',
planName='safe nominal posture plan'), parent=prep)
addTask(rt.WaitForManipulationPlanExecution(name='wait for manip execution'), parent=prep)
| bsd-3-clause |
jordanemedlock/psychtruths | temboo/core/Library/Amazon/EC2/DeleteSnapshot.py | 5 | 4191 | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteSnapshot
# Deletes a snapshot using a snapshot id that you specify.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteSnapshot(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteSnapshot Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteSnapshot, self).__init__(temboo_session, '/Library/Amazon/EC2/DeleteSnapshot')
def new_input_set(self):
return DeleteSnapshotInputSet()
def _make_result_set(self, result, path):
return DeleteSnapshotResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteSnapshotChoreographyExecution(session, exec_id, path)
class DeleteSnapshotInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteSnapshot
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(DeleteSnapshotInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(DeleteSnapshotInputSet, self)._set_input('AWSSecretKeyId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(DeleteSnapshotInputSet, self)._set_input('ResponseFormat', value)
def set_SnapshotId(self, value):
"""
Set the value of the SnapshotId input for this Choreo. ((required, string) The id of the snapshot to delete.)
"""
super(DeleteSnapshotInputSet, self)._set_input('SnapshotId', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the EC2 endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(DeleteSnapshotInputSet, self)._set_input('UserRegion', value)
class DeleteSnapshotResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteSnapshot Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class DeleteSnapshotChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteSnapshotResultSet(response, path)
| apache-2.0 |
MyAOSP/external_chromium_org_third_party_skia | tools/test_pdfs.py | 231 | 1801 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PDF files, and then compares
those resulting PDF files against PDF files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ('specify the location to output the rendered '
'files. Default is a temp directory.'))
parser.add_option('--diff_dir', dest='diff_dir',
help = ('specify the location to output the diff files. '
'Default is a temp directory.'))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pdfs', '')
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
remitamine/youtube-dl | youtube_dl/extractor/eagleplatform.py | 23 | 7736 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
unsmuggle_url,
url_or_none,
)
class EaglePlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
eagleplatform:(?P<custom_host>[^/]+):|
https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id=
)
(?P<id>\d+)
'''
_TESTS = [{
# http://lenta.ru/news/2015/03/06/navalny/
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
}, {
# http://muz-tv.ru/play/7129/
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
'url': 'eagleplatform:media.clipyou.ru:12820',
'md5': '358597369cf8ba56675c1df15e7af624',
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'skip': 'Georestricted',
}, {
# referrer protected video (https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/)
'url': 'eagleplatform:tvrainru.media.eagleplatform.com:582306',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
# Regular iframe embedding
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//.+?\.media\.eagleplatform\.com/index/player\?.+?)\1',
webpage)
if mobj is not None:
return mobj.group('url')
PLAYER_JS_RE = r'''
<script[^>]+
src=(?P<qjs>["\'])(?:https?:)?//(?P<host>(?:(?!(?P=qjs)).)+\.media\.eagleplatform\.com)/player/player\.js(?P=qjs)
.+?
'''
# "Basic usage" embedding (see http://dultonmedia.github.io/eplayer/)
mobj = re.search(
r'''(?xs)
%s
<div[^>]+
class=(?P<qclass>["\'])eagleplayer(?P=qclass)[^>]+
data-id=["\'](?P<id>\d+)
''' % PLAYER_JS_RE, webpage)
if mobj is not None:
return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict()
# Generalization of "Javascript code usage", "Combined usage" and
# "Usage without attaching to DOM" embeddings (see
# http://dultonmedia.github.io/eplayer/)
mobj = re.search(
r'''(?xs)
%s
<script>
.+?
new\s+EaglePlayer\(
(?:[^,]+\s*,\s*)?
{
.+?
\bid\s*:\s*["\']?(?P<id>\d+)
.+?
}
\s*\)
.+?
</script>
''' % PLAYER_JS_RE, webpage)
if mobj is not None:
return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict()
@staticmethod
def _handle_error(response):
status = int_or_none(response.get('status', 200))
if status != 200:
raise ExtractorError(' '.join(response['errors']), expected=True)
def _download_json(self, url_or_request, video_id, *args, **kwargs):
try:
response = super(EaglePlatformIE, self)._download_json(
url_or_request, video_id, *args, **kwargs)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError):
response = self._parse_json(ee.cause.read().decode('utf-8'), video_id)
self._handle_error(response)
raise
return response
def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'):
return self._download_json(url_or_request, video_id, note)['data'][0]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
headers = {}
query = {
'id': video_id,
}
referrer = smuggled_data.get('referrer')
if referrer:
headers['Referer'] = referrer
query['referrer'] = referrer
player_data = self._download_json(
'http://%s/api/player_data' % host, video_id,
headers=headers, query=query)
media = player_data['data']['playlist']['viewports'][0]['medialist'][0]
title = media['title']
description = media.get('description')
thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:')
duration = int_or_none(media.get('duration'))
view_count = int_or_none(media.get('views'))
age_restriction = media.get('age_restriction')
age_limit = None
if age_restriction:
age_limit = 0 if age_restriction == 'allow_all' else 18
secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:')
formats = []
m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON')
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
m3u8_formats_dict = {}
for f in m3u8_formats:
if f.get('height') is not None:
m3u8_formats_dict[f['height']] = f
mp4_data = self._download_json(
# Secure mp4 URL is constructed according to Player.prototype.mp4 from
# http://lentaru.media.eagleplatform.com/player/player.js
re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4s', secure_m3u8),
video_id, 'Downloading mp4 JSON', fatal=False)
if mp4_data:
for format_id, format_url in mp4_data.get('data', {}).items():
if not url_or_none(format_url):
continue
height = int_or_none(format_id)
if height is not None and m3u8_formats_dict.get(height):
f = m3u8_formats_dict[height].copy()
f.update({
'format_id': f['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
else:
f = {
'format_id': 'http-%s' % format_id,
'height': int_or_none(format_id),
}
f['url'] = format_url
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
| unlicense |
Whatever4783/Printrun | printrun/zscaper.py | 20 | 4787 | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
from stltool import stl, genfacet, emitstl
a = wx.App()
def genscape(data = [[0, 1, 0, 0], [1, 0, 2, 0], [1, 0, 0, 0], [0, 1, 0, 1]],
pscale = 1.0, bheight = 1.0, zscale = 1.0):
o = stl(None)
datal = len(data)
datah = len(data[0])
# create bottom:
bmidpoint = (pscale * (datal - 1) / 2.0, pscale * (datah - 1) / 2.0)
# print range(datal), bmidpoint
for i in zip(range(datal + 1)[:-1], range(datal + 1)[1:])[:-1]:
# print (pscale*i[0], pscale*i[1])
o.facets += [[[0, 0, -1], [[0.0, pscale * i[0], 0.0], [0.0, pscale * i[1], 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [[[0, 0, -1], [[2.0 * bmidpoint[1], pscale * i[1], 0.0], [2.0 * bmidpoint[1], pscale * i[0], 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [genfacet([[0.0, pscale * i[0], data[i[0]][0] * zscale + bheight], [0.0, pscale * i[1], data[i[1]][0] * zscale + bheight], [0.0, pscale * i[1], 0.0]])]
o.facets += [genfacet([[2.0 * bmidpoint[1], pscale * i[1], data[i[1]][datah - 1] * zscale + bheight], [2.0 * bmidpoint[1], pscale * i[0], data[i[0]][datah - 1] * zscale + bheight], [2.0 * bmidpoint[1], pscale * i[1], 0.0]])]
o.facets += [genfacet([[0.0, pscale * i[0], data[i[0]][0] * zscale + bheight], [0.0, pscale * i[1], 0.0], [0.0, pscale * i[0], 0.0]])]
o.facets += [genfacet([[2.0 * bmidpoint[1], pscale * i[1], 0.0], [2.0 * bmidpoint[1], pscale * i[0], data[i[0]][datah - 1] * zscale + bheight], [2.0 * bmidpoint[1], pscale * i[0], 0.0]])]
for i in zip(range(datah + 1)[: - 1], range(datah + 1)[1:])[: - 1]:
# print (pscale * i[0], pscale * i[1])
o.facets += [[[0, 0, -1], [[pscale * i[1], 0.0, 0.0], [pscale * i[0], 0.0, 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [[[0, 0, -1], [[pscale * i[0], 2.0 * bmidpoint[0], 0.0], [pscale * i[1], 2.0 * bmidpoint[0], 0.0], [bmidpoint[0], bmidpoint[1], 0.0]]]]
o.facets += [genfacet([[pscale * i[1], 0.0, data[0][i[1]] * zscale + bheight], [pscale * i[0], 0.0, data[0][i[0]] * zscale + bheight], [pscale * i[1], 0.0, 0.0]])]
o.facets += [genfacet([[pscale * i[0], 2.0 * bmidpoint[0], data[datal - 1][i[0]] * zscale + bheight], [pscale * i[1], 2.0 * bmidpoint[0], data[datal - 1][i[1]] * zscale + bheight], [pscale * i[1], 2.0 * bmidpoint[0], 0.0]])]
o.facets += [genfacet([[pscale * i[1], 0.0, 0.0], [pscale * i[0], 0.0, data[0][i[0]] * zscale + bheight], [pscale * i[0], 0.0, 0.0]])]
o.facets += [genfacet([[pscale * i[0], 2.0 * bmidpoint[0], data[datal - 1][i[0]] * zscale + bheight], [pscale * i[1], 2.0 * bmidpoint[0], 0.0], [pscale * i[0], 2.0 * bmidpoint[0], 0.0]])]
for i in xrange(datah - 1):
for j in xrange(datal - 1):
o.facets += [genfacet([[pscale * i, pscale * j, data[j][i] * zscale + bheight], [pscale * (i + 1), pscale * (j), data[j][i + 1] * zscale + bheight], [pscale * (i + 1), pscale * (j + 1), data[j + 1][i + 1] * zscale + bheight]])]
o.facets += [genfacet([[pscale * (i), pscale * (j + 1), data[j + 1][i] * zscale + bheight], [pscale * i, pscale * j, data[j][i] * zscale + bheight], [pscale * (i + 1), pscale * (j + 1), data[j + 1][i + 1] * zscale + bheight]])]
# print o.facets[-1]
return o
def zimage(name, out):
i = wx.Image(name)
s = i.GetSize()
print len(map(ord, i.GetData()[::3]))
b = map(ord, i.GetData()[::3])
data = []
for i in xrange(s[0]):
data += [b[i * s[1]:(i + 1) * s[1]]]
# data = [i[::5] for i in data[::5]]
emitstl(out, genscape(data, zscale = 0.1).facets, name)
"""
class scapewin(wx.Frame):
def __init__(self, size = (400, 530)):
wx.Frame.__init__(self, None,
title = "Right-click to load an image", size = size)
self.SetIcon(wx.Icon("plater.png", wx.BITMAP_TYPE_PNG))
self.SetClientSize(size)
self.panel = wx.Panel(self, size = size)
"""
if __name__ == '__main__':
"""
app = wx.App(False)
main = scapewin()
main.Show()
app.MainLoop()
"""
zimage("catposthtmap2.jpg", "testobj.stl")
del a
| gpl-3.0 |
pap/nupic | tests/unit/nupic/research/tp_constant_test.py | 35 | 4796 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests that we can learn and predict the particularly vexing case of a
single constant signal!
"""
import numpy as np
import unittest2 as unittest
from nupic.research import fdrutilities as fdrutils
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
_SEED = 42
VERBOSITY = 1
np.random.seed(_SEED)
def _printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = np.zeros(numCols, dtype='float32')
x[i*numOnes:(i + 1)*numOnes] = 1
p.append(x)
return p
def _createTps(numCols):
"""Create two instances of temporal poolers (TP.py and TP10X2.py) with
identical parameter settings."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 5
newSynapseCount = 7
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
globalDecay = 0
cellsPerColumn = 1
cppTp = TP10X2(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=_SEED, verbosity=VERBOSITY,
checkSynapseConsistency=True,
pamLength=1000)
# Ensure we are copying over learning states for TPDiff
cppTp.retrieveLearningStates = True
pyTp = TP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=_SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTp, pyTp
class TPConstantTest(unittest.TestCase):
def setUp(self):
self.cppTp, self.pyTp = _createTps(100)
def _basicTest(self, tp=None):
"""Test creation, pickling, and basic run of learning and inference."""
trainingSet = _getSimplePatterns(10, 10)
# Learn on several constant sequences, with a reset in between
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
tp.learn(seq)
tp.reset()
print "Learning completed"
# Infer
print "Running inference"
tp.collectStats = True
for seq in trainingSet[0:5]:
tp.reset()
tp.resetStats()
for _ in range(10):
tp.infer(seq)
if VERBOSITY > 1 :
print
_printOneTrainingVector(seq)
tp.printStates(False, False)
print
print
if VERBOSITY > 1:
print tp.getStats()
# Ensure our predictions are accurate for each sequence
self.assertGreater(tp.getStats()['predictionScoreAvg2'], 0.8)
print ("tp.getStats()['predictionScoreAvg2'] = ",
tp.getStats()['predictionScoreAvg2'])
print "TPConstant basicTest ok"
def testCppTpBasic(self):
self._basicTest(self.cppTp)
def testPyTpBasic(self):
self._basicTest(self.pyTp)
def testIdenticalTps(self):
self.assertTrue(fdrutils.tpDiff2(self.cppTp, self.pyTp))
if __name__=="__main__":
unittest.main()
| agpl-3.0 |
beppec56/core | testtools/source/bridgetest/pyuno/core.py | 5 | 17624 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
import pyuno
import uno
import unittest
import exceptions
import types
def suite(ctx):
suite = unittest.TestSuite()
suite.addTest(TestCase("testErrors",ctx))
suite.addTest(TestCase("testBaseTypes",ctx))
suite.addTest(TestCase("testOutparam",ctx))
suite.addTest(TestCase("testStruct",ctx))
suite.addTest(TestCase("testType",ctx))
suite.addTest(TestCase("testEnum",ctx))
suite.addTest(TestCase("testBool",ctx))
suite.addTest(TestCase("testChar",ctx))
suite.addTest(TestCase("testUnicode",ctx))
suite.addTest(TestCase("testConstant",ctx))
suite.addTest(TestCase("testExceptions",ctx))
suite.addTest(TestCase("testInterface",ctx))
suite.addTest(TestCase("testByteSequence",ctx))
suite.addTest(TestCase("testInvoke",ctx))
return suite
def equalsEps( a,b,eps ):
if a - eps <= b and a+eps >= b:
return 1
return 0
def assign( rData, bBool, cChar, nByte, nShort, nUShort, nLong, nULong, nHyper,\
nUHyper, fFloat, fDouble, eEnum, rStr, xTest, rAny ):
rData.Bool = bBool;
rData.Char = cChar;
rData.Byte = nByte;
rData.Short = nShort;
rData.UShort = nUShort;
rData.Long = nLong;
rData.ULong = nULong;
rData.Hyper = nHyper;
rData.UHyper = nUHyper;
rData.Float = fFloat;
rData.Double = fDouble;
rData.Enum = eEnum;
rData.String = rStr;
rData.Interface = xTest;
rData.Any = rAny;
class PythonTransporter:
def __init__( self ):
pass
def transportAny( self, arg ):
return arg
class TestCase( unittest.TestCase):
def __init__(self,method,ctx):
unittest.TestCase.__init__(self,method)
self.ctx = ctx
def setUp(self):
# the testcomponent from the testtools project
self.tobj = self.ctx.ServiceManager.createInstanceWithContext(
'com.sun.star.test.bridge.CppTestObject' , self.ctx )
self.tobj.Bool = 1
self.tobj.Char = 'h'
self.tobj.Byte = 43
self.tobj.Short = -42
self.tobj.UShort = 44
self.tobj.Long = 42
self.tobj.ULong = 41
self.tobj.Hyper = 46
self.tobj.UHyper = 47
self.tobj.Float = 4.3
self.tobj.Double = 4.2
self.tobj.Enum = 4
self.tobj.String = "yabadabadoo"
self.tobj.Interface = self.ctx
self.tobj.Any = self.tobj.String
mystruct = uno.createUnoStruct( "test.testtools.bridgetest.TestData" )
assign( mystruct, 1, 'h', 43, -42,44,42,41,46,47,4.3,4.2,4,"yabadabadoo",self.ctx,"yabadabadoo")
self.tobj.Struct = mystruct
self.testElement = uno.createUnoStruct( "test.testtools.bridgetest.TestElement" )
self.testElement.String = "foo"
self.testElement2 = uno.createUnoStruct( "test.testtools.bridgetest.TestElement" )
self.testElement2.String = "42"
self.tobj.Sequence = (self.testElement,self.testElement2)
def testBaseTypes(self):
self.failUnless( 42 == self.tobj.Long , "Long attribute" )
self.failUnless( 41 == self.tobj.ULong , "ULong attribute" )
self.failUnless( 43 == self.tobj.Byte , "Byte attribute" )
self.failUnless( 44 == self.tobj.UShort , "UShort attribute" )
self.failUnless( -42 == self.tobj.Short , "Short attribute" )
self.failUnless( 46 == self.tobj.Hyper , "Hyper attribute" )
self.failUnless( 47 == self.tobj.UHyper , "UHyper attribute" )
self.failUnless( self.tobj.Bool , "Bool attribute2" )
self.failUnless( "yabadabadoo" == self.tobj.String , "String attribute" )
self.failUnless( self.tobj.Sequence[0] == self.testElement , "Sequence test")
self.failUnless( self.tobj.Sequence[1] == self.testElement2 , "Sequence2 test")
self.failUnless( equalsEps( 4.3,self.tobj.Float,0.0001) , "float test" )
self.failUnless( 4.2 == self.tobj.Double , "double test" )
self.failUnless( self.ctx == self.tobj.Interface ,
"object identity test with C++ object" )
self.failUnless( not self.ctx == self.tobj , "object not identical test " )
self.failUnless( 42 == self.tobj.transportAny( 42 ), "transportAny long" )
self.failUnless( "woo, this is python" == self.tobj.transportAny( "woo, this is python" ), \
"string roundtrip via any test" )
def testEnum( self ):
e1 = uno.Enum( "com.sun.star.uno.TypeClass" , "LONG" )
e2 = uno.Enum( "com.sun.star.uno.TypeClass" , "LONG" )
e3 = uno.Enum( "com.sun.star.uno.TypeClass" , "UNSIGNED_LONG" )
e4 = uno.Enum( "test.testtools.bridgetest.TestEnum" , "TWO" )
self.failUnless( e1 == e2 , "equal enum test" )
self.failUnless( not (e1 == e3) , "different enums test" )
self.failUnless( self.tobj.transportAny( e3 ) == e3, "enum roundtrip test" )
self.tobj.Enum = e4
self.failUnless( e4 == self.tobj.Enum , "enum assignment failed" )
def testType(self ):
t1 = uno.getTypeByName( "com.sun.star.lang.XComponent" )
t2 = uno.getTypeByName( "com.sun.star.lang.XComponent" )
t3 = uno.getTypeByName( "com.sun.star.lang.EventObject" )
self.failUnless( t1.typeClass == \
uno.Enum( "com.sun.star.uno.TypeClass", "INTERFACE" ), "typeclass of type test" )
self.failUnless( t3.typeClass == \
uno.Enum( "com.sun.star.uno.TypeClass", "STRUCT" ), "typeclass of type test")
self.failUnless( t1 == t2 , "equal type test" )
self.failUnless( t1 == t2 , "equal type test" )
self.failUnless( t1 == self.tobj.transportAny( t1 ), "type roundtrip test" )
def testBool( self ):
self.failUnless( uno.Bool(1) , "uno.Bool true test" )
self.failUnless( not uno.Bool(0) , "uno.Bool false test" )
self.failUnless( uno.Bool( "true") , "uno.Bool true1 test" )
self.failUnless( not uno.Bool( "false") , "uno.Bool true1 test" )
self.tobj.Bool = uno.Bool(1)
self.failUnless( self.tobj.Bool , "bool true attribute test" )
self.tobj.Bool = uno.Bool(0)
self.failUnless( not self.tobj.Bool , "bool true attribute test" )
# new boolean semantic
self.failUnless( id( self.tobj.transportAny( True ) ) == id(True) , "boolean preserve test")
self.failUnless( id( self.tobj.transportAny( False ) ) == id(False) , "boolean preserve test" )
self.failUnless( id( self.tobj.transportAny(1) ) != id( True ), "boolean preserve test" )
self.failUnless( id( self.tobj.transportAny(0) ) != id( False ), "boolean preserve test" )
def testChar( self ):
self.tobj.Char = uno.Char( u'h' )
self.failUnless( self.tobj.Char == uno.Char( u'h' ), "char type test" )
self.failUnless( isinstance( self.tobj.transportAny( uno.Char(u'h') ),uno.Char),"char preserve test" )
def testStruct( self ):
mystruct = uno.createUnoStruct( "test.testtools.bridgetest.TestData" )
assign( mystruct, 1, 'h', 43, -42,44,42,41,46,47,4.3,4.2,4,"yabadabadoo",self.ctx,"yabadabadoo")
self.tobj.Struct = mystruct
aSecondStruct = self.tobj.Struct
self.failUnless( self.tobj.Struct == mystruct, "struct roundtrip for equality test" )
self.failUnless( aSecondStruct == mystruct, "struct roundtrip for equality test2" )
aSecondStruct.Short = 720
self.failUnless( not aSecondStruct == mystruct , "different structs equality test" )
self.failUnless( not self.ctx == mystruct , "object is not equal to struct test" )
self.failUnless( mystruct == self.tobj.transportAny( mystruct ), "struct roundtrip with any test" )
my2ndstruct = uno.createUnoStruct( "test.testtools.bridgetest.TestData", \
1, 'h', 43, -42,44,42,41,46,47,4.3,4.2,4,"yabadabadoo",self.ctx,"yabadabadoo",())
self.failUnless( my2ndstruct == mystruct, "struct non-default ctor test" )
def testUnicode( self ):
uni = u'\0148'
self.tobj.String = uni
self.failUnless( uni == self.tobj.String )
self.tobj.String = u'dubidu'
self.failUnless( u'dubidu' == self.tobj.String , "unicode comparison test")
self.failUnless( 'dubidu' == self.tobj.String , "unicode vs. string comparison test" )
def testConstant( self ):
self.failUnless( uno.getConstantByName( "com.sun.star.beans.PropertyConcept.ATTRIBUTES" ) == 4,\
"constant retrieval test" )
def testExceptions( self ):
unoExc = uno.getClass( "com.sun.star.uno.Exception" )
ioExc = uno.getClass( "com.sun.star.io.IOException" )
dispExc = uno.getClass( "com.sun.star.lang.DisposedException" )
wasHere = 0
try:
raise ioExc( "huhuh" , self.tobj )
except unoExc , instance:
wasHere = 1
self.failUnless( wasHere , "exception test 1" )
wasHere = 0
try:
raise ioExc
except ioExc:
wasHere = 1
else:
self.failUnless( wasHere, "exception test 2" )
wasHere = 0
try:
raise dispExc
except ioExc:
pass
except unoExc:
wasHere = 1
self.failUnless(wasHere, "exception test 3")
illegalArg = uno.getClass( "com.sun.star.lang.IllegalArgumentException" )
wasHere = 0
try:
self.tobj.raiseException( 1 , "foo" , self.tobj )
self.failUnless( 0 , "exception test 5a" )
except ioExc:
self.failUnless( 0 , "exception test 5b" )
except illegalArg, i:
self.failUnless( 1 == i.ArgumentPosition , "exception member test" )
self.failUnless( "foo" == i.Message , "exception member test 2 " )
wasHere = 1
else:
self.failUnless( 0, "except test 5c" )
self.failUnless( wasHere, "illegal argument exception test failed" )
def testInterface(self):
clazz = uno.getClass( "com.sun.star.lang.XComponent" )
self.failUnless( "com.sun.star.lang.XComponent" == clazz.__pyunointerface__ )
self.failUnless( issubclass( clazz, uno.getClass( "com.sun.star.uno.XInterface" ) ) )
self.tobj.Interface = None
def testOutparam( self):
# outparameter
struct, mybool,mychar,mybyte,myshort,myushort,mylong,myulong,myhyper,myuhyper,myfloat, \
mydouble,myenum,mystring,myinterface,myany,myseq,my2ndstruct = self.tobj.getValues( \
None,None,None,None,None,None,None,None,None,None, \
None,None,None,None,None,None,None)
self.failUnless(struct == self.tobj.Struct, "outparam 1 test")
self.failUnless(self.tobj.Bool, "outparam 2 test")
self.failUnless(mychar == self.tobj.Char, "outparam 3 test")
self.failUnless(mybyte == self.tobj.Byte, "outparam 4 test")
self.failUnless(myshort == self.tobj.Short, "outparam 5 test")
self.failUnless(myushort == self.tobj.UShort, "outparam 6 test")
self.failUnless(mylong == self.tobj.Long, "outparam 7 test")
self.failUnless(myulong == self.tobj.ULong, "outparam 8 test")
self.failUnless(myhyper == self.tobj.Hyper, "outparam 9 test")
self.failUnless(myuhyper == self.tobj.UHyper, "outparam 10 test")
self.failUnless(myfloat == self.tobj.Float, "outparam 11 test")
self.failUnless(mydouble == self.tobj.Double, "outparam 12 test")
self.failUnless(myenum == self.tobj.Enum, "outparam 13 test")
self.failUnless(mystring == self.tobj.String, "outparam 14 test")
self.failUnless(myinterface == self.tobj.Interface, "outparam 15 test")
self.failUnless(myany == self.tobj.Any, "outparam 16 test")
self.failUnless(myseq == self.tobj.Sequence, "outparam 17 test")
self.failUnless(my2ndstruct == struct, "outparam 18 test")
# should work, debug on windows, why not
# struct, mybool,mychar,mybyte,myshort,myushort,mylong,myulong,myhyper,myuhyper,myfloat,\
# mydouble,myenum,mystring,myinterface,myany,myseq,my2ndstruct = self.tobj.setValues2( \
# mybool,mychar,mybyte,myshort,myushort,mylong,myulong,myhyper,myuhyper,myfloat,\
# mydouble,myenum,mystring,myinterface,myany,myseq,my2ndstruct)
# self.failUnless(struct == self.tobj.Struct, "outparam 1 test")
# self.failUnless( mybool and self.tobj.Bool, "outparam 2 test")
# self.failUnless(mychar == self.tobj.Char, "outparam 3 test")
# self.failUnless(mybyte == self.tobj.Byte, "outparam 4 test")
# self.failUnless(myshort == self.tobj.Short, "outparam 5 test")
# self.failUnless(myushort == self.tobj.UShort, "outparam 6 test")
# self.failUnless(mylong == self.tobj.Long, "outparam 7 test")
# self.failUnless(myulong == self.tobj.ULong, "outparam 8 test")
# self.failUnless(myhyper == self.tobj.Hyper, "outparam 9 test")
# self.failUnless(myuhyper == self.tobj.UHyper, "outparam 10 test")
# self.failUnless(myfloat == self.tobj.Float, "outparam 11 test")
# self.failUnless(mydouble == self.tobj.Double, "outparam 12 test")
# self.failUnless(myenum == self.tobj.Enum, "outparam 13 test")
# self.failUnless(mystring == self.tobj.String, "outparam 14 test")
# self.failUnless(myinterface == self.tobj.Interface, "outparam 15 test")
# self.failUnless(myany == self.tobj.Any, "outparam 16 test")
# self.failUnless(myseq == self.tobj.Sequence, "outparam 17 test")
# self.failUnless(my2ndstruct == struct, "outparam 18 test")
def testErrors( self ):
wasHere = 0
try:
self.tobj.a = 5
self.fail("attribute a shouldn't exist")
except AttributeError:
wasHere = 1
except IllegalArgumentException:
wasHere = 1
self.failUnless( wasHere, "wrong attribute test" )
IllegalArgumentException = uno.getClass("com.sun.star.lang.IllegalArgumentException" )
RuntimeException = uno.getClass("com.sun.star.uno.RuntimeException" )
# TODO: Remove this once it is done
# wrong number of arguments bug !?
self.failUnlessRaises( IllegalArgumentException, self.tobj.transportAny, 42, 43 )
self.failUnlessRaises( IllegalArgumentException, self.tobj.transportAny )
self.failUnlessRaises( RuntimeException, uno.getClass, "a.b" )
self.failUnlessRaises( RuntimeException, uno.getClass, "com.sun.star.uno.TypeClass" )
self.failUnlessRaises( RuntimeException, uno.Enum, "a" , "b" )
self.failUnlessRaises( RuntimeException, uno.Enum, "com.sun.star.uno.TypeClass" , "b" )
self.failUnlessRaises( RuntimeException, uno.Enum, "com.sun.star.uno.XInterface" , "b" )
tcInterface =uno.Enum( "com.sun.star.uno.TypeClass" , "INTERFACE" )
self.failUnlessRaises( RuntimeException, uno.Type, "a", tcInterface )
self.failUnlessRaises( RuntimeException, uno.Type, "com.sun.star.uno.Exception", tcInterface )
self.failUnlessRaises( (RuntimeException,exceptions.RuntimeError), uno.getTypeByName, "a" )
self.failUnlessRaises( (RuntimeException), uno.getConstantByName, "a" )
self.failUnlessRaises( (RuntimeException), uno.getConstantByName, "com.sun.star.uno.XInterface" )
def testByteSequence( self ):
s = uno.ByteSequence( b"ab" )
self.failUnless( s == uno.ByteSequence( b"ab" ) )
self.failUnless( uno.ByteSequence( b"abc" ) == s + uno.ByteSequence( b"c" ) )
self.failUnless( uno.ByteSequence( b"abc" ) == s + "c" )
self.failUnless( s + "c" == "abc" )
self.failUnless( s == uno.ByteSequence( s ) )
self.failUnless( s[0] == 'a' )
self.failUnless( s[1] == 'b' )
def testInvoke( self ):
self.failUnless( 5 == uno.invoke( self.tobj , "transportAny" , (uno.Any("byte", 5),) ) )
self.failUnless( 5 == uno.invoke(
PythonTransporter(), "transportAny" , (uno.Any( "byte", 5 ),) ) )
t = uno.getTypeByName( "long" )
mystruct = uno.createUnoStruct(
"com.sun.star.beans.PropertyValue", "foo",0,uno.Any(t,2),0 )
mystruct.Value = uno.Any(t, 1)
| gpl-3.0 |
irinabov/debian-qpid-python | qpid/tests/__init__.py | 3 | 1654 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class Test:
def __init__(self, name):
self.name = name
def configure(self, config):
self.config = config
# API Tests
import qpid.tests.framing
import qpid.tests.mimetype
import qpid.tests.messaging
# Legacy Tests
import qpid.tests.codec
import qpid.tests.queue
import qpid.tests.datatypes
import qpid.tests.connection
import qpid.tests.spec010
import qpid.tests.codec010
import qpid.tests.util
import qpid.tests.saslmech.finder
class TestTestsXXX(Test):
def testFoo(self):
print "this test has output"
def testBar(self):
print "this test "*8
print "has"*10
print "a"*75
print "lot of"*10
print "output"*10
def testQux(self):
import sys
sys.stdout.write("this test has output with no newline")
def testQuxFail(self):
import sys
sys.stdout.write("this test has output with no newline")
fdsa
| apache-2.0 |
abhishek-ch/hue | desktop/core/ext-py/Paste-2.0.1/paste/util/intset.py | 50 | 19262 | # -*- coding: iso-8859-15 -*-
"""Immutable integer set type.
Integer set class.
Copyright (C) 2006, Heiko Wundram.
Released under the MIT license.
"""
import six
# Version information
# -------------------
__author__ = "Heiko Wundram <me@modelnine.org>"
__version__ = "0.2"
__revision__ = "6"
__date__ = "2006-01-20"
# Utility classes
# ---------------
class _Infinity(object):
"""Internal type used to represent infinity values."""
__slots__ = ["_neg"]
def __init__(self,neg):
self._neg = neg
def __lt__(self,value):
if not isinstance(value, _VALID_TYPES):
return NotImplemented
return ( self._neg and
not ( isinstance(value,_Infinity) and value._neg ) )
def __le__(self,value):
if not isinstance(value, _VALID_TYPES):
return NotImplemented
return self._neg
def __gt__(self,value):
if not isinstance(value, _VALID_TYPES):
return NotImplemented
return not ( self._neg or
( isinstance(value,_Infinity) and not value._neg ) )
def __ge__(self,value):
if not isinstance(value, _VALID_TYPES):
return NotImplemented
return not self._neg
def __eq__(self,value):
if not isinstance(value, _VALID_TYPES):
return NotImplemented
return isinstance(value,_Infinity) and self._neg == value._neg
def __ne__(self,value):
if not isinstance(value, _VALID_TYPES):
return NotImplemented
return not isinstance(value,_Infinity) or self._neg != value._neg
def __repr__(self):
return "None"
_VALID_TYPES = six.integer_types + (_Infinity,)
# Constants
# ---------
_MININF = _Infinity(True)
_MAXINF = _Infinity(False)
# Integer set class
# -----------------
class IntSet(object):
"""Integer set class with efficient storage in a RLE format of ranges.
Supports minus and plus infinity in the range."""
__slots__ = ["_ranges","_min","_max","_hash"]
def __init__(self,*args,**kwargs):
"""Initialize an integer set. The constructor accepts an unlimited
number of arguments that may either be tuples in the form of
(start,stop) where either start or stop may be a number or None to
represent maximum/minimum in that direction. The range specified by
(start,stop) is always inclusive (differing from the builtin range
operator).
Keyword arguments that can be passed to an integer set are min and
max, which specify the minimum and maximum number in the set,
respectively. You can also pass None here to represent minus or plus
infinity, which is also the default.
"""
# Special case copy constructor.
if len(args) == 1 and isinstance(args[0],IntSet):
if kwargs:
raise ValueError("No keyword arguments for copy constructor.")
self._min = args[0]._min
self._max = args[0]._max
self._ranges = args[0]._ranges
self._hash = args[0]._hash
return
# Initialize set.
self._ranges = []
# Process keyword arguments.
self._min = kwargs.pop("min",_MININF)
self._max = kwargs.pop("max",_MAXINF)
if self._min is None:
self._min = _MININF
if self._max is None:
self._max = _MAXINF
# Check keyword arguments.
if kwargs:
raise ValueError("Invalid keyword argument.")
if not ( isinstance(self._min, six.integer_types) or self._min is _MININF ):
raise TypeError("Invalid type of min argument.")
if not ( isinstance(self._max, six.integer_types) or self._max is _MAXINF ):
raise TypeError("Invalid type of max argument.")
if ( self._min is not _MININF and self._max is not _MAXINF and
self._min > self._max ):
raise ValueError("Minimum is not smaller than maximum.")
if isinstance(self._max, six.integer_types):
self._max += 1
# Process arguments.
for arg in args:
if isinstance(arg, six.integer_types):
start, stop = arg, arg+1
elif isinstance(arg,tuple):
if len(arg) != 2:
raise ValueError("Invalid tuple, must be (start,stop).")
# Process argument.
start, stop = arg
if start is None:
start = self._min
if stop is None:
stop = self._max
# Check arguments.
if not ( isinstance(start, six.integer_types) or start is _MININF ):
raise TypeError("Invalid type of tuple start.")
if not ( isinstance(stop, six.integer_types) or stop is _MAXINF ):
raise TypeError("Invalid type of tuple stop.")
if ( start is not _MININF and stop is not _MAXINF and
start > stop ):
continue
if isinstance(stop, six.integer_types):
stop += 1
else:
raise TypeError("Invalid argument.")
if start > self._max:
continue
elif start < self._min:
start = self._min
if stop < self._min:
continue
elif stop > self._max:
stop = self._max
self._ranges.append((start,stop))
# Normalize set.
self._normalize()
# Utility functions for set operations
# ------------------------------------
def _iterranges(self,r1,r2,minval=_MININF,maxval=_MAXINF):
curval = minval
curstates = {"r1":False,"r2":False}
imax, jmax = 2*len(r1), 2*len(r2)
i, j = 0, 0
while i < imax or j < jmax:
if i < imax and ( ( j < jmax and
r1[i>>1][i&1] < r2[j>>1][j&1] ) or
j == jmax ):
cur_r, newname, newstate = r1[i>>1][i&1], "r1", not (i&1)
i += 1
else:
cur_r, newname, newstate = r2[j>>1][j&1], "r2", not (j&1)
j += 1
if curval < cur_r:
if cur_r > maxval:
break
yield curstates, (curval,cur_r)
curval = cur_r
curstates[newname] = newstate
if curval < maxval:
yield curstates, (curval,maxval)
def _normalize(self):
self._ranges.sort()
i = 1
while i < len(self._ranges):
if self._ranges[i][0] < self._ranges[i-1][1]:
self._ranges[i-1] = (self._ranges[i-1][0],
max(self._ranges[i-1][1],
self._ranges[i][1]))
del self._ranges[i]
else:
i += 1
self._ranges = tuple(self._ranges)
self._hash = hash(self._ranges)
def __coerce__(self,other):
if isinstance(other,IntSet):
return self, other
elif isinstance(other, six.integer_types + (tuple,)):
try:
return self, self.__class__(other)
except TypeError:
# Catch a type error, in that case the structure specified by
# other is something we can't coerce, return NotImplemented.
# ValueErrors are not caught, they signal that the data was
# invalid for the constructor. This is appropriate to signal
# as a ValueError to the caller.
return NotImplemented
elif isinstance(other,list):
try:
return self, self.__class__(*other)
except TypeError:
# See above.
return NotImplemented
return NotImplemented
# Set function definitions
# ------------------------
def _make_function(name,type,doc,pall,pany=None):
"""Makes a function to match two ranges. Accepts two types: either
'set', which defines a function which returns a set with all ranges
matching pall (pany is ignored), or 'bool', which returns True if pall
matches for all ranges and pany matches for any one range. doc is the
dostring to give this function. pany may be none to ignore the any
match.
The predicates get a dict with two keys, 'r1', 'r2', which denote
whether the current range is present in range1 (self) and/or range2
(other) or none of the two, respectively."""
if type == "set":
def f(self,other):
coerced = self.__coerce__(other)
if coerced is NotImplemented:
return NotImplemented
other = coerced[1]
newset = self.__class__.__new__(self.__class__)
newset._min = min(self._min,other._min)
newset._max = max(self._max,other._max)
newset._ranges = []
for states, (start,stop) in \
self._iterranges(self._ranges,other._ranges,
newset._min,newset._max):
if pall(states):
if newset._ranges and newset._ranges[-1][1] == start:
newset._ranges[-1] = (newset._ranges[-1][0],stop)
else:
newset._ranges.append((start,stop))
newset._ranges = tuple(newset._ranges)
newset._hash = hash(self._ranges)
return newset
elif type == "bool":
def f(self,other):
coerced = self.__coerce__(other)
if coerced is NotImplemented:
return NotImplemented
other = coerced[1]
_min = min(self._min,other._min)
_max = max(self._max,other._max)
found = not pany
for states, (start,stop) in \
self._iterranges(self._ranges,other._ranges,_min,_max):
if not pall(states):
return False
found = found or pany(states)
return found
else:
raise ValueError("Invalid type of function to create.")
try:
f.func_name = name
except TypeError:
pass
f.func_doc = doc
return f
# Intersection.
__and__ = _make_function("__and__","set",
"Intersection of two sets as a new set.",
lambda s: s["r1"] and s["r2"])
__rand__ = _make_function("__rand__","set",
"Intersection of two sets as a new set.",
lambda s: s["r1"] and s["r2"])
intersection = _make_function("intersection","set",
"Intersection of two sets as a new set.",
lambda s: s["r1"] and s["r2"])
# Union.
__or__ = _make_function("__or__","set",
"Union of two sets as a new set.",
lambda s: s["r1"] or s["r2"])
__ror__ = _make_function("__ror__","set",
"Union of two sets as a new set.",
lambda s: s["r1"] or s["r2"])
union = _make_function("union","set",
"Union of two sets as a new set.",
lambda s: s["r1"] or s["r2"])
# Difference.
__sub__ = _make_function("__sub__","set",
"Difference of two sets as a new set.",
lambda s: s["r1"] and not s["r2"])
__rsub__ = _make_function("__rsub__","set",
"Difference of two sets as a new set.",
lambda s: s["r2"] and not s["r1"])
difference = _make_function("difference","set",
"Difference of two sets as a new set.",
lambda s: s["r1"] and not s["r2"])
# Symmetric difference.
__xor__ = _make_function("__xor__","set",
"Symmetric difference of two sets as a new set.",
lambda s: s["r1"] ^ s["r2"])
__rxor__ = _make_function("__rxor__","set",
"Symmetric difference of two sets as a new set.",
lambda s: s["r1"] ^ s["r2"])
symmetric_difference = _make_function("symmetric_difference","set",
"Symmetric difference of two sets as a new set.",
lambda s: s["r1"] ^ s["r2"])
# Containership testing.
__contains__ = _make_function("__contains__","bool",
"Returns true if self is superset of other.",
lambda s: s["r1"] or not s["r2"])
issubset = _make_function("issubset","bool",
"Returns true if self is subset of other.",
lambda s: s["r2"] or not s["r1"])
istruesubset = _make_function("istruesubset","bool",
"Returns true if self is true subset of other.",
lambda s: s["r2"] or not s["r1"],
lambda s: s["r2"] and not s["r1"])
issuperset = _make_function("issuperset","bool",
"Returns true if self is superset of other.",
lambda s: s["r1"] or not s["r2"])
istruesuperset = _make_function("istruesuperset","bool",
"Returns true if self is true superset of other.",
lambda s: s["r1"] or not s["r2"],
lambda s: s["r1"] and not s["r2"])
overlaps = _make_function("overlaps","bool",
"Returns true if self overlaps with other.",
lambda s: True,
lambda s: s["r1"] and s["r2"])
# Comparison.
__eq__ = _make_function("__eq__","bool",
"Returns true if self is equal to other.",
lambda s: not ( s["r1"] ^ s["r2"] ))
__ne__ = _make_function("__ne__","bool",
"Returns true if self is different to other.",
lambda s: True,
lambda s: s["r1"] ^ s["r2"])
# Clean up namespace.
del _make_function
# Define other functions.
def inverse(self):
"""Inverse of set as a new set."""
newset = self.__class__.__new__(self.__class__)
newset._min = self._min
newset._max = self._max
newset._ranges = []
laststop = self._min
for r in self._ranges:
if laststop < r[0]:
newset._ranges.append((laststop,r[0]))
laststop = r[1]
if laststop < self._max:
newset._ranges.append((laststop,self._max))
return newset
__invert__ = inverse
# Hashing
# -------
def __hash__(self):
"""Returns a hash value representing this integer set. As the set is
always stored normalized, the hash value is guaranteed to match for
matching ranges."""
return self._hash
# Iterating
# ---------
def __len__(self):
"""Get length of this integer set. In case the length is larger than
2**31 (including infinitely sized integer sets), it raises an
OverflowError. This is due to len() restricting the size to
0 <= len < 2**31."""
if not self._ranges:
return 0
if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
raise OverflowError("Infinitely sized integer set.")
rlen = 0
for r in self._ranges:
rlen += r[1]-r[0]
if rlen >= 2**31:
raise OverflowError("Integer set bigger than 2**31.")
return rlen
def len(self):
"""Returns the length of this integer set as an integer. In case the
length is infinite, returns -1. This function exists because of a
limitation of the builtin len() function which expects values in
the range 0 <= len < 2**31. Use this function in case your integer
set might be larger."""
if not self._ranges:
return 0
if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
return -1
rlen = 0
for r in self._ranges:
rlen += r[1]-r[0]
return rlen
def __nonzero__(self):
"""Returns true if this integer set contains at least one item."""
return bool(self._ranges)
def __iter__(self):
"""Iterate over all values in this integer set. Iteration always starts
by iterating from lowest to highest over the ranges that are bounded.
After processing these, all ranges that are unbounded (maximum 2) are
yielded intermixed."""
ubranges = []
for r in self._ranges:
if r[0] is _MININF:
if r[1] is _MAXINF:
ubranges.extend(([0,1],[-1,-1]))
else:
ubranges.append([r[1]-1,-1])
elif r[1] is _MAXINF:
ubranges.append([r[0],1])
else:
for val in xrange(r[0],r[1]):
yield val
if ubranges:
while True:
for ubrange in ubranges:
yield ubrange[0]
ubrange[0] += ubrange[1]
# Printing
# --------
def __repr__(self):
"""Return a representation of this integer set. The representation is
executable to get an equal integer set."""
rv = []
for start, stop in self._ranges:
if ( isinstance(start, six.integer_types) and isinstance(stop, six.integer_types)
and stop-start == 1 ):
rv.append("%r" % start)
elif isinstance(stop, six.integer_types):
rv.append("(%r,%r)" % (start,stop-1))
else:
rv.append("(%r,%r)" % (start,stop))
if self._min is not _MININF:
rv.append("min=%r" % self._min)
if self._max is not _MAXINF:
rv.append("max=%r" % self._max)
return "%s(%s)" % (self.__class__.__name__,",".join(rv))
if __name__ == "__main__":
# Little test script demonstrating functionality.
x = IntSet((10,20),30)
y = IntSet((10,20))
z = IntSet((10,20),30,(15,19),min=0,max=40)
print(x)
print(x&110)
print(x|110)
print(x^(15,25))
print(x-12)
print(12 in x)
print(x.issubset(x))
print(y.issubset(x))
print(x.istruesubset(x))
print(y.istruesubset(x))
for val in x:
print(val)
print(x.inverse())
print(x == z)
print(x == y)
print(x != y)
print(hash(x))
print(hash(z))
print(len(x))
print(x.len())
| apache-2.0 |
googleads/google-ads-python | google/ads/googleads/v8/services/services/bidding_strategy_service/transports/base.py | 1 | 4183 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import bidding_strategy
from google.ads.googleads.v8.services.types import bidding_strategy_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class BiddingStrategyServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for BiddingStrategyService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_bidding_strategy: gapic_v1.method.wrap_method(
self.get_bidding_strategy,
default_timeout=None,
client_info=client_info,
),
self.mutate_bidding_strategies: gapic_v1.method.wrap_method(
self.mutate_bidding_strategies,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_bidding_strategy(
self,
) -> typing.Callable[
[bidding_strategy_service.GetBiddingStrategyRequest],
bidding_strategy.BiddingStrategy,
]:
raise NotImplementedError
@property
def mutate_bidding_strategies(
self,
) -> typing.Callable[
[bidding_strategy_service.MutateBiddingStrategiesRequest],
bidding_strategy_service.MutateBiddingStrategiesResponse,
]:
raise NotImplementedError
__all__ = ("BiddingStrategyServiceTransport",)
| apache-2.0 |
assassinen/python_training | dump/fastMessageReaderOriginal.py | 1 | 1383 | #!/usr/bin/python
import sys
import re
# ============================================================================
class MessageReader:
messageRegexp = r"s*(\w+)\[\d+\]=(.*?)(?=\s\w+\[\d+\]|$)";
def __init__(self, fileName):
self.fileName = fileName
#self.file = open(fileName, encoding="utf8")
self.file = open(fileName)
self.carryover = "";
def __del__(self):
self.file.close()
def getMessage(self):
if (self.carryover != ""):
line = self.carryover
self.carryover = ""
else:
line = self.file.readline()
while (line.startswith('ApplVerID') is not True):
if not line: return {}
line = self.file.readline()
message = dict(re.findall(self.messageRegexp, line))
message['entries'] = []
line = self.file.readline();
noEntries = re.sub(".*?NoMDEntries\[268\]\s*=\s*(\d+)[^\d]*", r'\1', line)
if (noEntries == line):
self.carryover = line;
return message
for i in range(int(noEntries)):
line = self.file.readline().split(':')[1].strip()
entry = dict(re.findall(self.messageRegexp, line))
message["entries"].append(entry)
return message
# ============================================================================
| apache-2.0 |
liukaijv/XlsxWriter | xlsxwriter/test/comparison/test_header_image03.py | 8 | 3175 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
from ...compatibility import BytesIO
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'header_image03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header('&L&G&C&G&R&G',
{'image_left': self.image_dir + 'red.jpg',
'image_center': self.image_dir + 'blue.jpg',
'image_right': self.image_dir + 'yellow.jpg'})
workbook.close()
self.assertExcelEqual()
def test_create_file_with_picture(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header('&L&[Picture]&C&G&R&[Picture]',
{'image_left': self.image_dir + 'red.jpg',
'image_center': self.image_dir + 'blue.jpg',
'image_right': self.image_dir + 'yellow.jpg'})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
image_file_left = open(self.image_dir + 'red.jpg', 'rb')
image_data_left = BytesIO(image_file_left.read())
image_file_left.close()
image_file_center = open(self.image_dir + 'blue.jpg', 'rb')
image_data_center = BytesIO(image_file_center.read())
image_file_center.close()
image_file_right = open(self.image_dir + 'yellow.jpg', 'rb')
image_data_right = BytesIO(image_file_right.read())
image_file_right.close()
worksheet.set_header('&L&G&C&G&R&G',
{'image_left': 'red.jpg',
'image_center': 'blue.jpg',
'image_right': 'yellow.jpg',
'image_data_left': image_data_left,
'image_data_center': image_data_center,
'image_data_right': image_data_right,
})
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
smashwilson/ansible-modules-core | cloud/digital_ocean/digital_ocean_domain.py | 9 | 7060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_domain
short_description: Create/delete a DNS record in DigitalOcean
description:
- Create/delete a DNS record in DigitalOcean.
version_added: "1.6"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the droplet id you want to operate on.
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
ip:
description:
- The IP address to point a domain at.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Create a domain record
- digital_ocean_domain: >
state=present
name=my.digitalocean.domain
ip=127.0.0.1
# Create a droplet and a corresponding domain record
- digital_ocean: >
state=present
name=test_droplet
size_id=1
region_id=2
image_id=3
register: test_droplet
- digital_ocean_domain: >
state=present
name={{ test_droplet.droplet.name }}.my.domain
ip={{ test_droplet.droplet.ip_address }}
'''
import os
import time
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError as e:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class DomainRecord(JsonfyMixIn):
manager = None
def __init__(self, json):
self.__dict__.update(json)
update_attr = __init__
def update(self, data = None, record_type = None):
json = self.manager.edit_domain_record(self.domain_id,
self.id,
record_type if record_type is not None else self.record_type,
data if data is not None else self.data)
self.__dict__.update(json)
return self
def destroy(self):
json = self.manager.destroy_domain_record(self.domain_id, self.id)
return json
class Domain(JsonfyMixIn):
manager = None
def __init__(self, domain_json):
self.__dict__.update(domain_json)
def destroy(self):
self.manager.destroy_domain(self.id)
def records(self):
json = self.manager.all_domain_records(self.id)
return map(DomainRecord, json)
@classmethod
def add(cls, name, ip):
json = cls.manager.new_domain(name, ip)
return cls(json)
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
DomainRecord.manager = cls.manager
@classmethod
def list_all(cls):
domains = cls.manager.all_domains()
return map(cls, domains)
@classmethod
def find(cls, name=None, id=None):
if name is None and id is None:
return False
domains = Domain.list_all()
if id is not None:
for domain in domains:
if domain.id == id:
return domain
if name is not None:
for domain in domains:
if domain.name == name:
return domain
return False
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
Domain.setup(client_id, api_key)
if state in ('present'):
domain = Domain.find(id=module.params["id"])
if not domain:
domain = Domain.find(name=getkeyordie("name"))
if not domain:
domain = Domain.add(getkeyordie("name"),
getkeyordie("ip"))
module.exit_json(changed=True, domain=domain.to_json())
else:
records = domain.records()
at_record = None
for record in records:
if record.name == "@":
at_record = record
if not at_record.data == getkeyordie("ip"):
record.update(data=getkeyordie("ip"), record_type='A')
module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json())
module.exit_json(changed=False, domain=domain.to_json())
elif state in ('absent'):
domain = None
if "id" in module.params:
domain = Domain.find(id=module.params["id"])
if not domain and "name" in module.params:
domain = Domain.find(name=module.params["name"])
if not domain:
module.exit_json(changed=False, msg="Domain not found.")
event_json = domain.destroy()
module.exit_json(changed=True, event=event_json)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ip = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
theo-l/django | tests/db_functions/math/test_mod.py | 69 | 1768 | import math
from decimal import Decimal
from django.db.models.functions import Mod
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class ModTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_mod_small=Mod('small', 'normal'),
null_mod_normal=Mod('normal', 'big'),
).first()
self.assertIsNone(obj.null_mod_small)
self.assertIsNone(obj.null_mod_normal)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-9.9'), n2=Decimal('4.6'))
obj = DecimalModel.objects.annotate(n_mod=Mod('n1', 'n2')).first()
self.assertIsInstance(obj.n_mod, Decimal)
self.assertAlmostEqual(obj.n_mod, Decimal(math.fmod(obj.n1, obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-25, f2=0.33)
obj = FloatModel.objects.annotate(f_mod=Mod('f1', 'f2')).first()
self.assertIsInstance(obj.f_mod, float)
self.assertAlmostEqual(obj.f_mod, math.fmod(obj.f1, obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_mod=Mod('small', 'normal'),
normal_mod=Mod('normal', 'big'),
big_mod=Mod('big', 'small'),
).first()
self.assertIsInstance(obj.small_mod, float)
self.assertIsInstance(obj.normal_mod, float)
self.assertIsInstance(obj.big_mod, float)
self.assertEqual(obj.small_mod, math.fmod(obj.small, obj.normal))
self.assertEqual(obj.normal_mod, math.fmod(obj.normal, obj.big))
self.assertEqual(obj.big_mod, math.fmod(obj.big, obj.small))
| bsd-3-clause |
gmatteo/pymatgen | pymatgen/electronic_structure/tests/test_boltztrap.py | 5 | 15695 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
import warnings
from monty.os.path import which
from monty.serialization import loadfn
from pymatgen.electronic_structure.bandstructure import BandStructure
from pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer, BoltztrapRunner
from pymatgen.electronic_structure.core import OrbitalType, Spin
from pymatgen.util.testing import PymatgenTest
try:
from ase.io.cube import read_cube
except ImportError:
read_cube = None
try:
import fdint
except ImportError:
fdint = None
x_trans = which("x_trans")
@unittest.skipIf(not x_trans, "No x_trans.")
class BoltztrapAnalyzerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bz = BoltztrapAnalyzer.from_files(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/transp/"))
cls.bz_bands = BoltztrapAnalyzer.from_files(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/bands/"))
cls.bz_up = BoltztrapAnalyzer.from_files(
os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/dos_up/"), dos_spin=1
)
cls.bz_dw = BoltztrapAnalyzer.from_files(
os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/dos_dw/"), dos_spin=-1
)
cls.bz_fermi = BoltztrapAnalyzer.from_files(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/fermi/"))
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "Cu2O_361_bandstructure.json"), "rt") as f:
d = json.load(f)
cls.bs = BandStructure.from_dict(d)
cls.btr = BoltztrapRunner(cls.bs, 1)
warnings.simplefilter("ignore")
@classmethod
def tearDownClass(cls):
warnings.simplefilter("default")
def test_properties(self):
self.assertAlmostEqual(self.bz.gap, 1.6644932121620404, 4)
array = self.bz._cond[300][102]
self.assertAlmostEqual(array[0][0] / 1e19, 7.5756518, 4)
self.assertAlmostEqual(array[0][2], -11.14679)
self.assertAlmostEqual(array[1][0], -88.203286)
self.assertAlmostEqual(array[2][2], 1.7133249e19)
array = self.bz._seebeck[300][22]
self.assertAlmostEqual(array[0][1], 6.4546074e-22)
self.assertAlmostEqual(array[1][1], -0.00032073711)
self.assertAlmostEqual(array[1][2], -2.9868424e-24)
self.assertAlmostEqual(array[2][2], -0.0003126543)
array = self.bz._kappa[500][300]
self.assertAlmostEqual(array[0][1], 0.00014524309)
self.assertAlmostEqual(array[1][1], 328834400000000.0)
self.assertAlmostEqual(array[1][2], 3.7758069e-05)
self.assertAlmostEqual(array[2][2], 193943750000000.0)
self.assertAlmostEqual(self.bz._hall[400][800][1][0][0], 9.5623749e-28)
self.assertAlmostEqual(self.bz._hall[400][68][1][2][2], 6.5106975e-10)
self.assertAlmostEqual(self.bz.doping["p"][3], 1e18)
self.assertAlmostEqual(self.bz.mu_doping["p"][300][2], 0.1553770018406)
self.assertAlmostEqual(self.bz.mu_doping["n"][300][-1], 1.6486017632924719, 4)
self.assertAlmostEqual(self.bz._cond_doping["n"][800][3][1][1] / 1e16, 1.5564085, 4)
self.assertAlmostEqual(self.bz._seebeck_doping["p"][600][2][0][1] / 1e-23, 3.2860613, 4)
self.assertAlmostEqual(self.bz._carrier_conc[500][67], 38.22832002)
self.assertAlmostEqual(self.bz.vol, 612.97557323964838, 4)
self.assertAlmostEqual(self.bz.intrans["scissor"], 0.0, 1)
self.assertAlmostEqual(self.bz._hall_doping["n"][700][-1][2][2][2], 5.0136483e-26)
self.assertAlmostEqual(self.bz.dos.efermi, -0.0300005507057)
self.assertAlmostEqual(self.bz.dos.energies[0], -2.4497049391830448, 4)
self.assertAlmostEqual(self.bz.dos.energies[345], -0.72708823447130944, 4)
self.assertAlmostEqual(self.bz.dos.energies[-1], 3.7569398770153524, 4)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][400], 118.70171)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][200], 179.58562)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][300], 289.43945)
self.assertAlmostEqual(self.bz_bands._bz_bands.shape, (1316, 20))
self.assertAlmostEqual(self.bz_bands._bz_kpoints.shape, (1316, 3))
self.assertAlmostEqual(self.bz_up._dos_partial["0"]["pz"][2562], 0.023862958)
self.assertAlmostEqual(self.bz_dw._dos_partial["1"]["px"][3120], 5.0192891)
self.assertAlmostEqual(self.bz_fermi.fermi_surface_data.shape, (121, 121, 65))
self.assertAlmostEqual(self.bz_fermi.fermi_surface_data[21][79][19], -1.8831911809439161, 5)
@unittest.skipIf(not fdint, "No FDINT")
def test_get_seebeck_eff_mass(self):
ref = [1.956090529381193, 2.0339311618566343, 1.1529383757896965]
ref2 = [4258.4072823354145, 4597.0351887125289, 4238.1262696392705]
sbk_mass_tens_mu = self.bz.get_seebeck_eff_mass(output="tensor", doping_levels=False, temp=300)[3]
sbk_mass_tens_dop = self.bz.get_seebeck_eff_mass(output="tensor", doping_levels=True, temp=300)["n"][2]
sbk_mass_avg_mu = self.bz.get_seebeck_eff_mass(output="average", doping_levels=False, temp=300)[3]
sbk_mass_avg_dop = self.bz.get_seebeck_eff_mass(output="average", doping_levels=True, temp=300)["n"][2]
for i in range(0, 3):
self.assertAlmostEqual(sbk_mass_tens_mu[i], ref2[i], 1)
self.assertAlmostEqual(sbk_mass_tens_dop[i], ref[i], 4)
self.assertAlmostEqual(sbk_mass_avg_mu, 4361.4744008038842, 1)
self.assertAlmostEqual(sbk_mass_avg_dop, 1.661553842105382, 4)
@unittest.skipIf(not fdint, "No FDINT")
def test_get_complexity_factor(self):
ref = [2.7658776815227828, 2.9826088215568403, 0.28881335881640308]
ref2 = [0.0112022048620205, 0.0036001049607186602, 0.0083028947173193028]
sbk_mass_tens_mu = self.bz.get_complexity_factor(output="tensor", doping_levels=False, temp=300)[3]
sbk_mass_tens_dop = self.bz.get_complexity_factor(output="tensor", doping_levels=True, temp=300)["n"][2]
sbk_mass_avg_mu = self.bz.get_complexity_factor(output="average", doping_levels=False, temp=300)[3]
sbk_mass_avg_dop = self.bz.get_complexity_factor(output="average", doping_levels=True, temp=300)["n"][2]
for i in range(0, 3):
self.assertAlmostEqual(sbk_mass_tens_mu[i], ref2[i], 4)
self.assertAlmostEqual(sbk_mass_tens_dop[i], ref[i], 4)
self.assertAlmostEqual(sbk_mass_avg_mu, 0.00628677029221, 4)
self.assertAlmostEqual(sbk_mass_avg_dop, 1.12322832119, 4)
def test_get_seebeck(self):
ref = [-768.99078999999995, -724.43919999999991, -686.84682999999973]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_seebeck()["n"][800][3][i], ref[i])
self.assertAlmostEqual(self.bz.get_seebeck(output="average")["p"][800][3], 697.608936667)
self.assertAlmostEqual(
self.bz.get_seebeck(output="average", doping_levels=False)[500][520],
1266.7056,
)
self.assertAlmostEqual(
self.bz.get_seebeck(output="average", doping_levels=False)[300][65],
-36.2459389333,
) # TODO: this was originally "eigs"
def test_get_conductivity(self):
ref = [5.9043185000000022, 17.855599000000002, 26.462935000000002]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_conductivity()["p"][600][2][i], ref[i])
self.assertAlmostEqual(self.bz.get_conductivity(output="average")["n"][700][1], 1.58736609667)
self.assertAlmostEqual(
self.bz.get_conductivity(output="average", doping_levels=False)[300][457],
2.87163566667,
)
self.assertAlmostEqual(
self.bz.get_conductivity(
output="average",
doping_levels=False,
# TODO: this was originally "eigs"
relaxation_time=1e-15,
)[200][63],
16573.0536667,
)
def test_get_power_factor(self):
ref = [6.2736602345523362, 17.900184232304138, 26.158282220458144]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_power_factor()["p"][200][2][i], ref[i])
self.assertAlmostEqual(self.bz.get_power_factor(output="average")["n"][600][4], 411.230962976)
self.assertAlmostEqual(
self.bz.get_power_factor(output="average", doping_levels=False, relaxation_time=1e-15)[500][459],
6.59277148467,
)
self.assertAlmostEqual(
self.bz.get_power_factor(output="average", doping_levels=False)[800][61],
2022.67064134,
) # TODO: this was originally "eigs"
def test_get_thermal_conductivity(self):
ref = [2.7719565628862623e-05, 0.00010048046886793946, 0.00015874549392499391]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_thermal_conductivity()["p"][300][2][i], ref[i])
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output="average", relaxation_time=1e-15)["n"][500][0],
1.74466575612e-07,
)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output="average", doping_levels=False)[800][874],
8.08066254813,
)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output="average", doping_levels=False)[200][32],
# TODO: this was originally "eigs"
0.0738961845832,
)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(k_el=False, output="average", doping_levels=False)[200][32],
0.19429052,
)
def test_get_zt(self):
ref = [0.097408810215, 0.29335112354, 0.614673998089]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_zt()["n"][800][4][i], ref[i])
self.assertAlmostEqual(self.bz.get_zt(output="average", kl=0.5)["p"][700][2], 0.0170001879916)
self.assertAlmostEqual(
self.bz.get_zt(output="average", doping_levels=False, relaxation_time=1e-15)[300][240],
0.0041923533238348342,
)
eigs = self.bz.get_zt(output="eigs", doping_levels=False)[700][65]
ref_eigs = [0.082420053399668847, 0.29408035502671648, 0.40822061215079392]
for idx, val in enumerate(ref_eigs):
self.assertAlmostEqual(eigs[idx], val, 5)
def test_get_average_eff_mass(self):
ref = [0.76045816788363574, 0.96181142990667101, 2.9428428773308628]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_average_eff_mass()["p"][300][2][i], ref[i])
ref = [1.1295783824744523, 1.3898454041924351, 5.2459984671977935]
ref2 = [6.6648842712692078, 31.492540105738343, 37.986369302138954]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_average_eff_mass()["n"][600][1][i], ref[i])
self.assertAlmostEqual(self.bz.get_average_eff_mass(doping_levels=False)[300][200][i], ref2[i])
ref = [
[9.61811430e-01, -8.25159596e-19, -4.70319444e-19],
[-8.25159596e-19, 2.94284288e00, 3.00368916e-18],
[-4.70319444e-19, 3.00368916e-18, 7.60458168e-01],
]
ref2 = [
[27.97604444269153, -2.39347589e-17, -1.36897140e-17],
[-2.39347589e-17, 8.55969097e01, 8.74169648e-17],
[-1.36897140e-17, 8.74169648e-17, 2.21151980e01],
]
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output="tensor")["p"][300][2][i][j],
ref[i][j],
4,
)
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output="tensor", doping_levels=False)[300][500][i][j],
ref2[i][j],
4,
)
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output="average")["n"][300][2],
1.53769093989,
4,
)
def test_get_carrier_concentration(self):
self.assertAlmostEqual(self.bz.get_carrier_concentration()[300][39] / 1e22, 6.4805156617179151, 4)
self.assertAlmostEqual(self.bz.get_carrier_concentration()[300][693] / 1e15, -6.590800965604750, 4)
def test_get_hall_carrier_concentration(self):
self.assertAlmostEqual(
self.bz.get_hall_carrier_concentration()[600][120] / 1e21,
6.773394626767555,
4,
)
self.assertAlmostEqual(
self.bz.get_hall_carrier_concentration()[500][892] / 1e21,
-9.136803845741777,
4,
)
def test_get_symm_bands(self):
structure = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/structure_mp-12103.json"))
sbs = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/dft_bs_sym_line.json"))
kpoints = [kp.frac_coords for kp in sbs.kpoints]
labels_dict = {k: sbs.labels_dict[k].frac_coords for k in sbs.labels_dict}
for kpt_line, labels_dict in zip([None, sbs.kpoints, kpoints], [None, sbs.labels_dict, labels_dict]):
sbs_bzt = self.bz_bands.get_symm_bands(structure, -5.25204548, kpt_line=kpt_line, labels_dict=labels_dict)
self.assertAlmostEqual(len(sbs_bzt.bands[Spin.up]), 20)
self.assertAlmostEqual(len(sbs_bzt.bands[Spin.up][1]), 143)
# def test_check_acc_bzt_bands(self):
# structure = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR,'boltztrap/structure_mp-12103.json'))
# sbs = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR,'boltztrap/dft_bs_sym_line.json'))
# sbs_bzt = self.bz_bands.get_symm_bands(structure,-5.25204548)
# corr,werr_vbm,werr_cbm,warn = BoltztrapAnalyzer.check_acc_bzt_bands(sbs_bzt,sbs)
# self.assertAlmostEqual(corr[2],9.16851750e-05)
# self.assertAlmostEqual(werr_vbm['K-H'],0.18260273521047862)
# self.assertAlmostEqual(werr_cbm['M-K'],0.071552669981356981)
# self.assertFalse(warn)
def test_get_complete_dos(self):
structure = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/structure_mp-12103.json"))
cdos = self.bz_up.get_complete_dos(structure, self.bz_dw)
spins = list(cdos.densities.keys())
self.assertIn(Spin.down, spins)
self.assertIn(Spin.up, spins)
self.assertAlmostEqual(
cdos.get_spd_dos()[OrbitalType.p].densities[Spin.up][3134],
43.839230100999991,
)
self.assertAlmostEqual(
cdos.get_spd_dos()[OrbitalType.s].densities[Spin.down][716],
6.5383268000000001,
)
def test_extreme(self):
x = self.bz.get_extreme("seebeck")
self.assertEqual(x["best"]["carrier_type"], "n")
self.assertAlmostEqual(x["p"]["value"], 1255.365, 2)
self.assertEqual(x["n"]["isotropic"], True)
self.assertEqual(x["n"]["temperature"], 600)
x = self.bz.get_extreme("kappa", maximize=False, min_temp=400, min_doping=1e20)
self.assertAlmostEqual(x["best"]["value"], 0.105, 2)
self.assertAlmostEqual(x["n"]["value"], 0.139, 2)
self.assertEqual(x["p"]["temperature"], 400)
self.assertEqual(x["n"]["isotropic"], False)
def test_to_from_dict(self):
btr_dict = self.btr.as_dict()
s = json.dumps(btr_dict)
self.assertIsNotNone(s)
self.assertIsNotNone(btr_dict["bs"])
if __name__ == "__main__":
unittest.main()
| mit |
weidongxu84/info-gatherer | django/template/loaders/eggs.py | 103 | 1038 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
| mit |
xinwu/horizon | openstack_dashboard/dashboards/project/access_and_security/keypairs/forms.py | 20 | 3170 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
NEW_LINES = re.compile(r"\r|\n")
KEYPAIR_NAME_REGEX = re.compile(r"^[\w\- ]+$", re.UNICODE)
KEYPAIR_ERROR_MESSAGES = {
'invalid': _('Key pair name may only contain letters, '
'numbers, underscores, spaces and hyphens.')}
class CreateKeypair(forms.SelfHandlingForm):
name = forms.RegexField(max_length=255,
label=_("Key Pair Name"),
regex=KEYPAIR_NAME_REGEX,
error_messages=KEYPAIR_ERROR_MESSAGES)
def handle(self, request, data):
return True # We just redirect to the download view.
def clean(self):
cleaned_data = super(CreateKeypair, self).clean()
name = cleaned_data.get('name')
try:
keypairs = api.nova.keypair_list(self.request)
except Exception:
exceptions.handle(self.request, ignore=True)
keypairs = []
if name in [keypair.name for keypair in keypairs]:
raise ValidationError(_('The name is already in use.'))
return cleaned_data
class ImportKeypair(forms.SelfHandlingForm):
name = forms.RegexField(max_length=255,
label=_("Key Pair Name"),
regex=KEYPAIR_NAME_REGEX,
error_messages=KEYPAIR_ERROR_MESSAGES)
public_key = forms.CharField(label=_("Public Key"), widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width'}))
def handle(self, request, data):
try:
# Remove any new lines in the public key
data['public_key'] = NEW_LINES.sub("", data['public_key'])
keypair = api.nova.keypair_import(request,
data['name'],
data['public_key'])
messages.success(request,
_('Successfully imported public key: %s')
% data['name'])
return keypair
except Exception:
exceptions.handle(request, ignore=True)
self.api_error(_('Unable to import key pair.'))
return False
| apache-2.0 |
olapaola/olapaola-android-scripting | python/src/Lib/markupbase.py | 243 | 14350 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the HTMLParser and sgmllib
modules (indirectly, for htmllib as well). It has no documented
public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in ("attlist", "linktype", "link", "element"):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in ("if", "else", "endif"):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in ("attlist", "element", "entity", "notation"):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| apache-2.0 |
dbrnz/PythonQtCopy | examples/NicePyConsole/pygments/lexers/webmisc.py | 3 | 36115 | # -*- coding: utf-8 -*-
"""
pygments.lexers.webmisc
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for misc. web stuff.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
from pygments.util import unirange
from pygments.lexers.css import _indentation, _starts_block
from pygments.lexers.html import HtmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.ruby import RubyLexer
__all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer']
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
.. versionadded:: 1.4
"""
name = 'Duel'
aliases = ['duel', 'jbst', 'jsonml+bst']
filenames = ['*.duel', '*.jbst']
mimetypes = ['text/x-duel', 'text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
.. versionadded:: 1.4
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
# ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
# )
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
# ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
# elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
# quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
# aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
# x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
# (r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'\{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
default('operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Float, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Float, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
# NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
# NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
# VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
# ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
# ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
# ELEMENT
(r'(element)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'element_qname'),
# PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
# URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class QmlLexer(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
.. versionadded:: 1.6
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
aliases = ['qml']
filenames = ['*.qml']
mimetypes = ['application/x-qml']
# pasted from JavascriptLexer, with some additions
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
# QML insertions
(r'\bid\s*:\s*[A-Za-z][_A-Za-z.0-9]*', Keyword.Declaration,
'slashstartsregex'),
(r'\b[A-Za-z][_A-Za-z.0-9]*\s*:', Keyword, 'slashstartsregex'),
# the rest from JavascriptLexer
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class CirruLexer(RegexLexer):
"""
Syntax rules of Cirru can be found at:
http://grammar.cirru.org/
* using ``()`` to markup blocks, but limited in the same line
* using ``""`` to markup strings, allow ``\`` to escape
* using ``$`` as a shorthand for ``()`` till indentation end or ``)``
* using indentations for create nesting
.. versionadded:: 2.0
"""
name = 'Cirru'
aliases = ['cirru']
filenames = ['*.cirru', '*.cr']
mimetypes = ['text/x-cirru']
flags = re.MULTILINE
tokens = {
'string': [
(r'[^"\\\n]', String),
(r'\\', String.Escape, 'escape'),
(r'"', String, '#pop'),
],
'escape': [
(r'.', String.Escape, '#pop'),
],
'function': [
(r'[\w-][^\s\(\)\"]*', Name.Function, '#pop'),
(r'\)', Operator, '#pop'),
(r'(?=\n)', Text, '#pop'),
(r'\(', Operator, '#push'),
(r'"', String, ('#pop', 'string')),
(r'\s+', Text.Whitespace),
(r'\,', Operator, '#pop'),
],
'line': [
(r'^\B', Text.Whitespace, 'function'),
(r'\$', Operator, 'function'),
(r'\(', Operator, 'function'),
(r'\)', Operator),
(r'(?=\n)', Text, '#pop'),
(r'\n', Text, '#pop'),
(r'"', String, 'string'),
(r'\s+', Text.Whitespace),
(r'[\d\.]+', Number),
(r'[\w-][^\"\(\)\s]*', Name.Variable),
(r'--', Comment.Single)
],
'root': [
(r'^\s*', Text.Whitespace, ('line', 'function')),
(r'^\s+$', Text.Whitespace),
]
}
class SlimLexer(ExtendedRegexLexer):
"""
For Slim markup.
.. versionadded:: 2.0
"""
name = 'Slim'
aliases = ['slim']
filenames = ['*.slim']
mimetypes = ['text/x-slim']
flags = re.IGNORECASE
_dot = r'(?: \|\n(?=.* \|)|.)'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'([ \t]*==?)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'[ \t]+[\w:-]+(?=[=])', Name.Attribute, 'html-attributes'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'[\w:-]+:[ \t]*\n', Text, 'plain'),
(r'(-)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r'\|' + _dot + r'*\n', _starts_block(Text, 'plain'), '#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment.Preproc, 'slim-comment-block'), '#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
(r'[ \t]+\n', Punctuation, '#pop:2'),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(.*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'=', Punctuation),
(r'"[^\"]+"', using(RubyLexer), 'tag'),
(r'\'[^\']+\'', using(RubyLexer), 'tag'),
(r'[\w]+', Text, 'tag'),
],
'slim-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
}
| lgpl-2.1 |
mrquim/mrquimrepo | repo/script.module.covenant/lib/resources/lib/sources/gr/xrysoi.py | 6 | 3448 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import trakt
from resources.lib.modules import tvmaze
class source:
def __init__(self):
self.priority = 1
self.language = ['gr']
self.domains = ['xrysoi.se']
self.base_link = 'http://xrysoi.se/'
self.search_link = 'search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'aliases': aliases,'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title']
year = data['year']
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
for post in posts:
try:
name = client.parseDOM(post, 'title')
links = client.parseDOM(post, 'a', ret='href')
t = re.sub('(\.|\(|\[|\s|)(\d{4})(\.|\)|\]|\s|)(.+|)', '',name[0])
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('\(\s*(\d{4})\s*\)', name[0])[0]
if not y == year: raise Exception()
for url in links:
if any(x in url for x in ['.online', 'xrysoi.se', 'filmer', '.bp', '.blogger']): continue
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
valid, host = source_utils.is_host_valid(url,hostDict)
if 'hdvid' in host: valid = True
if not valid: continue
quality = 'SD'
info = 'SUB'
sources.append({'source': host, 'quality': quality, 'language': 'gr', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url | gpl-2.0 |
Orochimarufan/youtube-dl | youtube_dl/extractor/reddit.py | 10 | 4208 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
url_or_none,
)
class RedditIE(InfoExtractor):
_VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
_TEST = {
# from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
'url': 'https://v.redd.it/zv89llsvexdz',
'md5': '0a070c53eba7ec4534d95a5a1259e253',
'info_dict': {
'id': 'zv89llsvexdz',
'ext': 'mp4',
'title': 'zv89llsvexdz',
},
'params': {
'format': 'bestvideo',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
formats = self._extract_m3u8_formats(
'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
formats.extend(self._extract_mpd_formats(
'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
mpd_id='dash', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': video_id,
'formats': formats,
}
class RedditRIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
'info_dict': {
'id': 'zv89llsvexdz',
'ext': 'mp4',
'title': 'That small heart attack.',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1501941939,
'upload_date': '20170805',
'uploader': 'Antw87',
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
},
'params': {
'format': 'bestvideo',
'skip_download': True,
},
}, {
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
'only_matching': True,
}, {
# imgur
'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
'only_matching': True,
}, {
# imgur @ old reddit
'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
'only_matching': True,
}, {
# streamable
'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
'only_matching': True,
}, {
# youtube
'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
'only_matching': True,
}, {
# reddit video @ nm reddit
'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
url, video_id = mobj.group('url', 'id')
video_id = self._match_id(url)
data = self._download_json(
url + '/.json', video_id)[0]['data']['children'][0]['data']
video_url = data['url']
# Avoid recursing into the same reddit URL
if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
raise ExtractorError('No media found', expected=True)
over_18 = data.get('over_18')
if over_18 is True:
age_limit = 18
elif over_18 is False:
age_limit = 0
else:
age_limit = None
return {
'_type': 'url_transparent',
'url': video_url,
'title': data.get('title'),
'thumbnail': url_or_none(data.get('thumbnail')),
'timestamp': float_or_none(data.get('created_utc')),
'uploader': data.get('author'),
'like_count': int_or_none(data.get('ups')),
'dislike_count': int_or_none(data.get('downs')),
'comment_count': int_or_none(data.get('num_comments')),
'age_limit': age_limit,
}
| unlicense |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/utils/tests/test_module_paths.py | 14 | 3758 | # encoding: utf-8
"""Tests for IPython.utils.module_paths.py"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import with_statement
import os
import shutil
import sys
import tempfile
from os.path import join, abspath, split
from IPython.testing.tools import make_tempfile
import IPython.utils.module_paths as mp
env = os.environ
TEST_FILE_PATH = split(abspath(__file__))[0]
TMP_TEST_DIR = tempfile.mkdtemp()
#
# Setup/teardown functions/decorators
#
old_syspath = sys.path
def make_empty_file(fname):
f = open(fname, 'w')
f.close()
def setup():
"""Setup testenvironment for the module:
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
os.makedirs(join(TMP_TEST_DIR, "xmod"))
os.makedirs(join(TMP_TEST_DIR, "nomod"))
make_empty_file(join(TMP_TEST_DIR, "xmod/__init__.py"))
make_empty_file(join(TMP_TEST_DIR, "xmod/sub.py"))
make_empty_file(join(TMP_TEST_DIR, "pack.py"))
make_empty_file(join(TMP_TEST_DIR, "packpyc.pyc"))
sys.path = [TMP_TEST_DIR]
def teardown():
"""Teardown testenvironment for the module:
- Remove tempdir
- restore sys.path
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
sys.path = old_syspath
def test_get_init_1():
"""See if get_init can find __init__.py in this testdir"""
with make_tempfile(join(TMP_TEST_DIR, "__init__.py")):
assert mp.get_init(TMP_TEST_DIR)
def test_get_init_2():
"""See if get_init can find __init__.pyw in this testdir"""
with make_tempfile(join(TMP_TEST_DIR, "__init__.pyw")):
assert mp.get_init(TMP_TEST_DIR)
def test_get_init_3():
"""get_init can't find __init__.pyc in this testdir"""
with make_tempfile(join(TMP_TEST_DIR, "__init__.pyc")):
assert mp.get_init(TMP_TEST_DIR) is None
def test_get_init_4():
"""get_init can't find __init__ in empty testdir"""
assert mp.get_init(TMP_TEST_DIR) is None
def test_find_mod_1():
modpath = join(TMP_TEST_DIR, "xmod", "__init__.py")
assert mp.find_mod("xmod") == modpath
def test_find_mod_2():
modpath = join(TMP_TEST_DIR, "xmod", "__init__.py")
assert mp.find_mod("xmod") == modpath
def test_find_mod_3():
modpath = join(TMP_TEST_DIR, "xmod", "sub.py")
assert mp.find_mod("xmod.sub") == modpath
def test_find_mod_4():
modpath = join(TMP_TEST_DIR, "pack.py")
assert mp.find_mod("pack") == modpath
def test_find_mod_5():
assert mp.find_mod("packpyc") is None
def test_find_module_1():
modpath = join(TMP_TEST_DIR, "xmod")
assert mp.find_module("xmod") == modpath
def test_find_module_2():
"""Testing sys.path that is empty"""
assert mp.find_module("xmod", []) is None
def test_find_module_3():
"""Testing sys.path that is empty"""
assert mp.find_module(None, None) is None
def test_find_module_4():
"""Testing sys.path that is empty"""
assert mp.find_module(None) is None
def test_find_module_5():
assert mp.find_module("xmod.nopack") is None
| mit |
huaweiswitch/neutron | neutron/agent/ovs_cleanup_util.py | 8 | 3836 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.common import config
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
opts = [
cfg.BoolOpt('ovs_all_ports',
default=False,
help=_('True to delete all ports on all the OpenvSwitch '
'bridges. False to delete ports created by '
'Neutron on integration and external network '
'bridges.'))
]
conf = cfg.CONF
conf.register_cli_opts(opts)
conf.register_opts(l3_agent.L3NATAgent.OPTS)
conf.register_opts(interface.OPTS)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_use_namespaces_opts_helper(conf)
agent_config.register_root_helper(conf)
return conf
def collect_neutron_ports(bridges, root_helper):
"""Collect ports created by Neutron from OVS."""
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge, root_helper)
ports += [port.port_name for port in ovs.get_vif_ports()]
return ports
def delete_neutron_ports(ports, root_helper):
"""Delete non-internal ports created by Neutron
Non-internal OVS ports need to be removed manually.
"""
for port in ports:
if ip_lib.device_exists(port):
device = ip_lib.IPDevice(port, root_helper)
device.link.delete()
LOG.info(_("Delete %s"), port)
def main():
"""Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron.
"""
conf = setup_conf()
conf()
config.setup_logging()
configuration_bridges = set([conf.ovs_integration_bridge,
conf.external_network_bridge])
ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper))
available_configuration_bridges = configuration_bridges & ovs_bridges
if conf.ovs_all_ports:
bridges = ovs_bridges
else:
bridges = available_configuration_bridges
# Collect existing ports created by Neutron on configuration bridges.
# After deleting ports from OVS bridges, we cannot determine which
# ports were created by Neutron, so port information is collected now.
ports = collect_neutron_ports(available_configuration_bridges,
conf.AGENT.root_helper)
for bridge in bridges:
LOG.info(_("Cleaning %s"), bridge)
ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper)
ovs.delete_ports(all_ports=conf.ovs_all_ports)
# Remove remaining ports created by Neutron (usually veth pair)
delete_neutron_ports(ports, conf.AGENT.root_helper)
LOG.info(_("OVS cleanup completed successfully"))
| apache-2.0 |
garbled1/ansible | lib/ansible/modules/network/cloudengine/ce_bgp.py | 27 | 81614 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_bgp
version_added: "2.4"
short_description: Manages BGP configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
as_number:
description:
- Local AS number.
The value is a string of 1 to 11 characters.
required: false
default: null
graceful_restart:
description:
- Enable GR of the BGP speaker in the specified address family, peer address, or peer group.
required: false
default: no_use
choices: ['no_use','true','false']
time_wait_for_rib:
description:
- Period of waiting for the End-Of-RIB flag.
The value is an integer ranging from 3 to 3000. The default value is 600.
required: false
default: null
as_path_limit:
description:
- Maximum number of AS numbers in the AS_Path attribute. The default value is 255.
required: false
default: null
check_first_as:
description:
- Check the first AS in the AS_Path of the update messages from EBGP peers.
required: false
default: no_use
choices: ['no_use','true','false']
confed_id_number:
description:
- Confederation ID.
The value is a string of 1 to 11 characters.
required: false
default: null
confed_nonstanded:
description:
- Configure the device to be compatible with devices in a nonstandard confederation.
required: false
default: no_use
choices: ['no_use','true','false']
bgp_rid_auto_sel:
description:
- The function to automatically select router IDs for all VPN BGP instances is enabled.
required: false
default: no_use
choices: ['no_use','true','false']
keep_all_routes:
description:
- If the value is true, the system stores all route update messages received from all peers (groups) after
BGP connection setup.
If the value is false, the system stores only BGP update messages that are received from peers and pass
the configured import policy.
required: false
default: no_use
choices: ['no_use','true','false']
memory_limit:
description:
- Support BGP RIB memory protection.
required: false
default: no_use
choices: ['no_use','true','false']
gr_peer_reset:
description:
- Peer disconnection through GR.
required: false
default: no_use
choices: ['no_use','true','false']
is_shutdown:
description:
- Interrupt BGP all neighbor.
required: false
default: no_use
choices: ['no_use','true','false']
suppress_interval:
description:
- Suppress interval.
required: false
default: null
hold_interval:
description:
- Hold interval.
required: false
default: null
clear_interval:
description:
- Clear interval.
required: false
default: null
confed_peer_as_num:
description:
- Confederation AS number, in two-byte or four-byte format.
The value is a string of 1 to 11 characters.
required: false
default: null
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
required: false
default: null
vrf_rid_auto_sel:
description:
- If the value is true, VPN BGP instances are enabled to automatically select router IDs.
If the value is false, VPN BGP instances are disabled from automatically selecting router IDs.
required: false
default: no_use
choices: ['no_use','true','false']
router_id:
description:
- ID of a router that is in IPv4 address format.
required: false
default: null
keepalive_time:
description:
- If the value of a timer changes, the BGP peer relationship between the routers is disconnected.
The value is an integer ranging from 0 to 21845. The default value is 60.
required: false
default: null
hold_time:
description:
- Hold time, in seconds. The value of the hold time can be 0 or range from 3 to 65535.
required: false
default: null
min_hold_time:
description:
- Min hold time, in seconds. The value of the hold time can be 0 or range from 20 to 65535.
required: false
default: null
conn_retry_time:
description:
- ConnectRetry interval. The value is an integer, in seconds. The default value is 32s.
required: false
default: null
ebgp_if_sensitive:
description:
- If the value is true, After the fast EBGP interface awareness function is enabled, EBGP sessions on
an interface are deleted immediately when the interface goes Down.
If the value is false, After the fast EBGP interface awareness function is enabled, EBGP sessions
on an interface are not deleted immediately when the interface goes Down.
required: false
default: no_use
choices: ['no_use','true','false']
default_af_type:
description:
- Type of a created address family, which can be IPv4 unicast or IPv6 unicast.
The default type is IPv4 unicast.
required: false
default: null
choices: ['ipv4uni','ipv6uni']
'''
EXAMPLES = '''
- name: CloudEngine BGP test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Enable BGP"
ce_bgp:
state: present
as_number: 100
confed_id_number: 250
provider: "{{ cli }}"
- name: "Disable BGP"
ce_bgp:
state: absent
as_number: 100
confed_id_number: 250
provider: "{{ cli }}"
- name: "Create confederation peer AS num"
ce_bgp:
state: present
confed_peer_as_num: 260
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"as_number": "100", state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp_enable": [["100"], ["true"]]}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp_enable": [["100"], ["true"]]}
updates:
description: command sent to the device
returned: always
type: list
sample: ["bgp 100"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec
SUCCESS = """success"""
FAILED = """failed"""
# get bgp enable
CE_GET_BGP_ENABLE = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpSite>
<bgpEnable></bgpEnable>
<asNumber></asNumber>
</bgpSite>
</bgpcomm>
</bgp>
</filter>
"""
CE_GET_BGP_ENABLE_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpSite>
"""
CE_GET_BGP_ENABLE_TAIL = """
</bgpSite>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp enable
CE_MERGE_BGP_ENABLE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpSite operation="merge">
"""
CE_MERGE_BGP_ENABLE_TAIL = """
</bgpSite>
</bgpcomm>
</bgp>
</config>
"""
# get bgp confederation peer as
CE_GET_BGP_CONFED_PEER_AS = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs>
<confedPeerAsNum></confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp confederation peer as
CE_MERGE_BGP_CONFED_PEER_AS = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs operation="merge">
<confedPeerAsNum>%s</confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</config>
"""
# create bgp confederation peer as
CE_CREATE_BGP_CONFED_PEER_AS = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs operation="create">
<confedPeerAsNum>%s</confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp confederation peer as
CE_DELETE_BGP_CONFED_PEER_AS = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpConfedPeerAss>
<bgpConfedPeerAs operation="delete">
<confedPeerAsNum>%s</confedPeerAsNum>
</bgpConfedPeerAs>
</bgpConfedPeerAss>
</bgpcomm>
</bgp>
</config>
"""
# get bgp instance
CE_GET_BGP_INSTANCE = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName></vrfName>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# get bgp instance
CE_GET_BGP_INSTANCE_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
"""
CE_GET_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp instance
CE_MERGE_BGP_INSTANCE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf operation="merge">
"""
CE_MERGE_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp instance
CE_CREATE_BGP_INSTANCE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf operation="create">
"""
CE_CREATE_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp instance
CE_DELETE_BGP_INSTANCE_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf operation="delete">
"""
CE_DELETE_BGP_INSTANCE_TAIL = """
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
def check_ip_addr(**kwargs):
""" check_ip_addr """
ipaddr = kwargs["ipaddr"]
addr = ipaddr.strip().split('.')
if len(addr) != 4:
return FAILED
for i in range(4):
addr[i] = int(addr[i])
if addr[i] <= 255 and addr[i] >= 0:
pass
else:
return FAILED
return SUCCESS
def check_bgp_enable_args(**kwargs):
""" check_bgp_enable_args """
module = kwargs["module"]
need_cfg = False
as_number = module.params['as_number']
if as_number:
if len(as_number) > 11 or len(as_number) == 0:
module.fail_json(
msg='Error: The len of as_number %s is out of [1 - 11].' % as_number)
else:
need_cfg = True
return need_cfg
def check_bgp_confed_args(**kwargs):
""" check_bgp_confed_args """
module = kwargs["module"]
need_cfg = False
confed_peer_as_num = module.params['confed_peer_as_num']
if confed_peer_as_num:
if len(confed_peer_as_num) > 11 or len(confed_peer_as_num) == 0:
module.fail_json(
msg='Error: The len of confed_peer_as_num %s is out of [1 - 11].' % confed_peer_as_num)
else:
need_cfg = True
return need_cfg
class Bgp(object):
""" Manages BGP configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_enable_other_args(self, **kwargs):
""" check_bgp_enable_other_args """
module = kwargs["module"]
state = module.params['state']
result = dict()
need_cfg = False
graceful_restart = module.params['graceful_restart']
if graceful_restart != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<gracefulRestart></gracefulRestart>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<gracefulRestart>(.*)</gracefulRestart>.*', recv_xml)
if re_find:
result["graceful_restart"] = re_find
if re_find[0] != graceful_restart:
need_cfg = True
else:
need_cfg = True
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
if int(time_wait_for_rib) > 3000 or int(time_wait_for_rib) < 3:
module.fail_json(
msg='Error: The time_wait_for_rib %s is out of [3 - 3000].' % time_wait_for_rib)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<timeWaitForRib></timeWaitForRib>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<timeWaitForRib>(.*)</timeWaitForRib>.*', recv_xml)
if re_find:
result["time_wait_for_rib"] = re_find
if re_find[0] != time_wait_for_rib:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<timeWaitForRib>(.*)</timeWaitForRib>.*', recv_xml)
if re_find:
result["time_wait_for_rib"] = re_find
if re_find[0] == time_wait_for_rib:
need_cfg = True
as_path_limit = module.params['as_path_limit']
if as_path_limit:
if int(as_path_limit) > 2000 or int(as_path_limit) < 1:
module.fail_json(
msg='Error: The as_path_limit %s is out of [1 - 2000].' % as_path_limit)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<asPathLimit></asPathLimit>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<asPathLimit>(.*)</asPathLimit>.*', recv_xml)
if re_find:
result["as_path_limit"] = re_find
if re_find[0] != as_path_limit:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<asPathLimit>(.*)</asPathLimit>.*', recv_xml)
if re_find:
result["as_path_limit"] = re_find
if re_find[0] == as_path_limit:
need_cfg = True
check_first_as = module.params['check_first_as']
if check_first_as != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<checkFirstAs></checkFirstAs>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<checkFirstAs>(.*)</checkFirstAs>.*', recv_xml)
if re_find:
result["check_first_as"] = re_find
if re_find[0] != check_first_as:
need_cfg = True
else:
need_cfg = True
confed_id_number = module.params['confed_id_number']
if confed_id_number:
if len(confed_id_number) > 11 or len(confed_id_number) == 0:
module.fail_json(
msg='Error: The len of confed_id_number %s is out of [1 - 11].' % confed_id_number)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<confedIdNumber></confedIdNumber>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<confedIdNumber>(.*)</confedIdNumber>.*', recv_xml)
if re_find:
result["confed_id_number"] = re_find
if re_find[0] != confed_id_number:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<confedIdNumber>(.*)</confedIdNumber>.*', recv_xml)
if re_find:
result["confed_id_number"] = re_find
if re_find[0] == confed_id_number:
need_cfg = True
confed_nonstanded = module.params['confed_nonstanded']
if confed_nonstanded != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<confedNonstanded></confedNonstanded>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<confedNonstanded>(.*)</confedNonstanded>.*', recv_xml)
if re_find:
result["confed_nonstanded"] = re_find
if re_find[0] != confed_nonstanded:
need_cfg = True
else:
need_cfg = True
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if bgp_rid_auto_sel != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<bgpRidAutoSel></bgpRidAutoSel>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<bgpRidAutoSel>(.*)</bgpRidAutoSel>.*', recv_xml)
if re_find:
result["bgp_rid_auto_sel"] = re_find
if re_find[0] != bgp_rid_auto_sel:
need_cfg = True
else:
need_cfg = True
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<keepAllRoutes></keepAllRoutes>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAllRoutes>(.*)</keepAllRoutes>.*', recv_xml)
if re_find:
result["keep_all_routes"] = re_find
if re_find[0] != keep_all_routes:
need_cfg = True
else:
need_cfg = True
memory_limit = module.params['memory_limit']
if memory_limit != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<memoryLimit></memoryLimit>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<memoryLimit>(.*)</memoryLimit>.*', recv_xml)
if re_find:
result["memory_limit"] = re_find
if re_find[0] != memory_limit:
need_cfg = True
else:
need_cfg = True
gr_peer_reset = module.params['gr_peer_reset']
if gr_peer_reset != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<grPeerReset></grPeerReset>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<grPeerReset>(.*)</grPeerReset>.*', recv_xml)
if re_find:
result["gr_peer_reset"] = re_find
if re_find[0] != gr_peer_reset:
need_cfg = True
else:
need_cfg = True
is_shutdown = module.params['is_shutdown']
if is_shutdown != 'no_use':
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<isShutdown></isShutdown>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isShutdown>(.*)</isShutdown>.*', recv_xml)
if re_find:
result["is_shutdown"] = re_find
if re_find[0] != is_shutdown:
need_cfg = True
else:
need_cfg = True
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
if not hold_interval or not clear_interval:
module.fail_json(
msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.')
if int(suppress_interval) > 65535 or int(suppress_interval) < 1:
module.fail_json(
msg='Error: The suppress_interval %s is out of [1 - 65535].' % suppress_interval)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<suppressInterval></suppressInterval>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<suppressInterval>(.*)</suppressInterval>.*', recv_xml)
if re_find:
result["suppress_interval"] = re_find
if re_find[0] != suppress_interval:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<suppressInterval>(.*)</suppressInterval>.*', recv_xml)
if re_find:
result["suppress_interval"] = re_find
if re_find[0] == suppress_interval:
need_cfg = True
if hold_interval:
if not suppress_interval or not clear_interval:
module.fail_json(
msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.')
if int(hold_interval) > 65535 or int(hold_interval) < 1:
module.fail_json(
msg='Error: The hold_interval %s is out of [1 - 65535].' % hold_interval)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<holdInterval></holdInterval>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<holdInterval>(.*)</holdInterval>.*', recv_xml)
if re_find:
result["hold_interval"] = re_find
if re_find[0] != hold_interval:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<holdInterval>(.*)</holdInterval>.*', recv_xml)
if re_find:
result["hold_interval"] = re_find
if re_find[0] == hold_interval:
need_cfg = True
if clear_interval:
if not suppress_interval or not hold_interval:
module.fail_json(
msg='Error: Please input suppress_interval hold_interval clear_interval at the same time.')
if int(clear_interval) > 65535 or int(clear_interval) < 1:
module.fail_json(
msg='Error: The clear_interval %s is out of [1 - 65535].' % clear_interval)
else:
conf_str = CE_GET_BGP_ENABLE_HEADER + \
"<clearInterval></clearInterval>" + CE_GET_BGP_ENABLE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<clearInterval>(.*)</clearInterval>.*', recv_xml)
if re_find:
result["clear_interval"] = re_find
if re_find[0] != clear_interval:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<clearInterval>(.*)</clearInterval>.*', recv_xml)
if re_find:
result["clear_interval"] = re_find
if re_find[0] == clear_interval:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_instance_args(self, **kwargs):
""" check_bgp_instance_args """
module = kwargs["module"]
state = module.params['state']
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='the len of vrf_name %s is out of [1 - 31].' % vrf_name)
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<vrfName></vrfName>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
check_vrf_name = (vrf_name)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vrfName>(.*)</vrfName>.*', recv_xml)
if re_find:
if check_vrf_name not in re_find:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<vrfName>(.*)</vrfName>.*', recv_xml)
if re_find:
if check_vrf_name in re_find:
need_cfg = True
return need_cfg
def check_bgp_instance_other_args(self, **kwargs):
""" check_bgp_instance_other_args """
module = kwargs["module"]
state = module.params['state']
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
router_id = module.params['router_id']
if router_id:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if check_ip_addr(ipaddr=router_id) == FAILED:
module.fail_json(
msg='Error: The router_id %s is invalid.' % router_id)
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<routerId></routerId>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routerId>(.*)</routerId>.*', recv_xml)
if re_find:
result["router_id"] = re_find
if re_find[0] != router_id:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<routerId>(.*)</routerId>.*', recv_xml)
if re_find:
result["router_id"] = re_find
if re_find[0] == router_id:
need_cfg = True
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<vrfRidAutoSel></vrfRidAutoSel>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vrfRidAutoSel>(.*)</vrfRidAutoSel>.*', recv_xml)
if re_find:
result["vrf_rid_auto_sel"] = re_find
if re_find[0] != vrf_rid_auto_sel:
need_cfg = True
else:
need_cfg = True
keepalive_time = module.params['keepalive_time']
if keepalive_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(keepalive_time) > 21845 or int(keepalive_time) < 0:
module.fail_json(
msg='keepalive_time %s is out of [0 - 21845].' % keepalive_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<keepaliveTime></keepaliveTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepaliveTime>(.*)</keepaliveTime>.*', recv_xml)
if re_find:
result["keepalive_time"] = re_find
if re_find[0] != keepalive_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<keepaliveTime>(.*)</keepaliveTime>.*', recv_xml)
if re_find:
result["keepalive_time"] = re_find
if re_find[0] == keepalive_time:
need_cfg = True
hold_time = module.params['hold_time']
if hold_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(hold_time) > 65535 or int(hold_time) < 3:
module.fail_json(
msg='hold_time %s is out of [3 - 65535].' % hold_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<holdTime></holdTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<holdTime>(.*)</holdTime>.*', recv_xml)
if re_find:
result["hold_time"] = re_find
if re_find[0] != hold_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<holdTime>(.*)</holdTime>.*', recv_xml)
if re_find:
result["hold_time"] = re_find
if re_find[0] == hold_time:
need_cfg = True
min_hold_time = module.params['min_hold_time']
if min_hold_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(min_hold_time) != 0 and (int(min_hold_time) > 65535 or int(min_hold_time) < 20):
module.fail_json(
msg='min_hold_time %s is out of [0, or 20 - 65535].' % min_hold_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<minHoldTime></minHoldTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<minHoldTime>(.*)</minHoldTime>.*', recv_xml)
if re_find:
result["min_hold_time"] = re_find
if re_find[0] != min_hold_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<minHoldTime>(.*)</minHoldTime>.*', recv_xml)
if re_find:
result["min_hold_time"] = re_find
if re_find[0] == min_hold_time:
need_cfg = True
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
if int(conn_retry_time) > 65535 or int(conn_retry_time) < 1:
module.fail_json(
msg='conn_retry_time %s is out of [1 - 65535].' % conn_retry_time)
else:
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<connRetryTime></connRetryTime>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(
module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<connRetryTime>(.*)</connRetryTime>.*', recv_xml)
if re_find:
result["conn_retry_time"] = re_find
if re_find[0] != conn_retry_time:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<connRetryTime>(.*)</connRetryTime>.*', recv_xml)
if re_find:
result["conn_retry_time"] = re_find
if re_find[0] == conn_retry_time:
need_cfg = True
else:
pass
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<ebgpIfSensitive></ebgpIfSensitive>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ebgpIfSensitive>(.*)</ebgpIfSensitive>.*', recv_xml)
if re_find:
result["ebgp_if_sensitive"] = re_find
if re_find[0] != ebgp_if_sensitive:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<ebgpIfSensitive>(.*)</ebgpIfSensitive>.*', recv_xml)
if re_find:
result["ebgp_if_sensitive"] = re_find
if re_find[0] == ebgp_if_sensitive:
need_cfg = True
else:
pass
default_af_type = module.params['default_af_type']
if default_af_type:
if not vrf_name:
module.fail_json(
msg='Error: Please input vrf_name.')
conf_str = CE_GET_BGP_INSTANCE_HEADER + \
"<defaultAfType></defaultAfType>" + CE_GET_BGP_INSTANCE_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultAfType>(.*)</defaultAfType>.*', recv_xml)
if re_find:
result["default_af_type"] = re_find
if re_find[0] != default_af_type:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<defaultAfType>(.*)</defaultAfType>.*', recv_xml)
if re_find:
result["default_af_type"] = re_find
if re_find[0] == default_af_type:
need_cfg = True
else:
pass
result["need_cfg"] = need_cfg
return result
def get_bgp_enable(self, **kwargs):
""" get_bgp_enable """
module = kwargs["module"]
conf_str = CE_GET_BGP_ENABLE
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<asNumber>(.*)</asNumber>.*\s*<bgpEnable>(.*)</bgpEnable>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_enable(self, **kwargs):
""" merge_bgp_enable """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_ENABLE_HEADER
state = module.params['state']
if state == "present":
conf_str += "<bgpEnable>true</bgpEnable>"
else:
conf_str += "<bgpEnable>false</bgpEnable>"
as_number = module.params['as_number']
if as_number:
conf_str += "<asNumber>%s</asNumber>" % as_number
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp enable failed.')
cmds = []
if state == "present":
cmd = "bgp %s" % as_number
else:
cmd = "undo bgp %s" % as_number
cmds.append(cmd)
return cmds
def merge_bgp_enable_other(self, **kwargs):
""" merge_bgp_enable_other """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_ENABLE_HEADER
cmds = []
graceful_restart = module.params['graceful_restart']
if graceful_restart != 'no_use':
conf_str += "<gracefulRestart>%s</gracefulRestart>" % graceful_restart
if graceful_restart == "true":
cmd = "graceful-restart"
else:
cmd = "undo graceful-restart"
cmds.append(cmd)
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
conf_str += "<timeWaitForRib>%s</timeWaitForRib>" % time_wait_for_rib
cmd = "graceful-restart timer wait-for-rib %s" % time_wait_for_rib
cmds.append(cmd)
as_path_limit = module.params['as_path_limit']
if as_path_limit:
conf_str += "<asPathLimit>%s</asPathLimit>" % as_path_limit
cmd = "as-path-limit %s" % as_path_limit
cmds.append(cmd)
check_first_as = module.params['check_first_as']
if check_first_as != 'no_use':
conf_str += "<checkFirstAs>%s</checkFirstAs>" % check_first_as
if check_first_as == "true":
cmd = "check-first-as"
else:
cmd = "undo check-first-as"
cmds.append(cmd)
confed_id_number = module.params['confed_id_number']
if confed_id_number:
conf_str += "<confedIdNumber>%s</confedIdNumber>" % confed_id_number
cmd = "confederation id %s" % confed_id_number
cmds.append(cmd)
confed_nonstanded = module.params['confed_nonstanded']
if confed_nonstanded != 'no_use':
conf_str += "<confedNonstanded>%s</confedNonstanded>" % confed_nonstanded
if confed_nonstanded == "true":
cmd = "confederation nonstandard"
else:
cmd = "undo confederation nonstandard"
cmds.append(cmd)
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if bgp_rid_auto_sel != 'no_use':
conf_str += "<bgpRidAutoSel>%s</bgpRidAutoSel>" % bgp_rid_auto_sel
if bgp_rid_auto_sel == "true":
cmd = "router-id vpn-instance auto-select"
else:
cmd = "undo router-id"
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "keep-all-routes"
else:
cmd = "undo keep-all-routes"
cmds.append(cmd)
memory_limit = module.params['memory_limit']
if memory_limit != 'no_use':
conf_str += "<memoryLimit>%s</memoryLimit>" % memory_limit
if memory_limit == "true":
cmd = "prefix memory-limit"
else:
cmd = "undo prefix memory-limit"
cmds.append(cmd)
gr_peer_reset = module.params['gr_peer_reset']
if gr_peer_reset != 'no_use':
conf_str += "<grPeerReset>%s</grPeerReset>" % gr_peer_reset
if gr_peer_reset == "true":
cmd = "graceful-restart peer-reset"
else:
cmd = "undo graceful-restart peer-reset"
cmds.append(cmd)
is_shutdown = module.params['is_shutdown']
if is_shutdown != 'no_use':
conf_str += "<isShutdown>%s</isShutdown>" % is_shutdown
if is_shutdown == "true":
cmd = "shutdown"
else:
cmd = "undo shutdown"
cmds.append(cmd)
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
conf_str += "<suppressInterval>%s</suppressInterval>" % suppress_interval
cmd = "nexthop recursive-lookup restrain suppress-interval %s hold-interval %s " \
"clear-interval %s" % (suppress_interval, hold_interval, clear_interval)
cmds.append(cmd)
if hold_interval:
conf_str += "<holdInterval>%s</holdInterval>" % hold_interval
if clear_interval:
conf_str += "<clearInterval>%s</clearInterval>" % clear_interval
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp enable failed.')
return cmds
def delete_bgp_enable_other(self, **kwargs):
""" delete bgp enable other args """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_ENABLE_HEADER
cmds = []
graceful_restart = module.params['graceful_restart']
if graceful_restart != 'no_use':
conf_str += "<gracefulRestart>%s</gracefulRestart>" % graceful_restart
if graceful_restart == "true":
cmd = "graceful-restart"
else:
cmd = "undo graceful-restart"
cmds.append(cmd)
time_wait_for_rib = module.params['time_wait_for_rib']
if time_wait_for_rib:
conf_str += "<timeWaitForRib>600</timeWaitForRib>"
cmd = "undo graceful-restart timer wait-for-rib"
cmds.append(cmd)
as_path_limit = module.params['as_path_limit']
if as_path_limit:
conf_str += "<asPathLimit>255</asPathLimit>"
cmd = "undo as-path-limit"
cmds.append(cmd)
check_first_as = module.params['check_first_as']
if check_first_as != 'no_use':
conf_str += "<checkFirstAs>%s</checkFirstAs>" % check_first_as
if check_first_as == "true":
cmd = "check-first-as"
else:
cmd = "undo check-first-as"
cmds.append(cmd)
confed_id_number = module.params['confed_id_number']
if confed_id_number:
conf_str += "<confedIdNumber></confedIdNumber>"
cmd = "undo confederation id"
cmds.append(cmd)
confed_nonstanded = module.params['confed_nonstanded']
if confed_nonstanded != 'no_use':
conf_str += "<confedNonstanded>%s</confedNonstanded>" % confed_nonstanded
if confed_nonstanded == "true":
cmd = "confederation nonstandard"
else:
cmd = "undo confederation nonstandard"
cmds.append(cmd)
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
if bgp_rid_auto_sel != 'no_use':
conf_str += "<bgpRidAutoSel>%s</bgpRidAutoSel>" % bgp_rid_auto_sel
if bgp_rid_auto_sel == "true":
cmd = "router-id vpn-instance auto-select"
else:
cmd = "undo router-id"
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "keep-all-routes"
else:
cmd = "undo keep-all-routes"
cmds.append(cmd)
memory_limit = module.params['memory_limit']
if memory_limit != 'no_use':
conf_str += "<memoryLimit>%s</memoryLimit>" % memory_limit
if memory_limit == "true":
cmd = "prefix memory-limit"
else:
cmd = "undo prefix memory-limit"
cmds.append(cmd)
gr_peer_reset = module.params['gr_peer_reset']
if gr_peer_reset != 'no_use':
conf_str += "<grPeerReset>%s</grPeerReset>" % gr_peer_reset
if gr_peer_reset == "true":
cmd = "graceful-restart peer-reset"
else:
cmd = "undo graceful-restart peer-reset"
cmds.append(cmd)
is_shutdown = module.params['is_shutdown']
if is_shutdown != 'no_use':
conf_str += "<isShutdown>%s</isShutdown>" % is_shutdown
if is_shutdown == "true":
cmd = "shutdown"
else:
cmd = "undo shutdown"
cmds.append(cmd)
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
if suppress_interval:
conf_str += "<suppressInterval>60</suppressInterval>"
cmd = "nexthop recursive-lookup restrain suppress-interval %s hold-interval %s " \
"clear-interval %s" % (suppress_interval, hold_interval, clear_interval)
cmds.append(cmd)
if hold_interval:
conf_str += "<holdInterval>120</holdInterval>"
if clear_interval:
conf_str += "<clearInterval>600</clearInterval>"
conf_str += CE_MERGE_BGP_ENABLE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp enable failed.')
return cmds
def get_bgp_confed_peer_as(self, **kwargs):
""" get_bgp_confed_peer_as """
module = kwargs["module"]
conf_str = CE_GET_BGP_CONFED_PEER_AS
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<confedPeerAsNum>(.*)</confedPeerAsNum>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_confed_peer_as(self, **kwargs):
""" merge_bgp_confed_peer_as """
module = kwargs["module"]
confed_peer_as_num = module.params['confed_peer_as_num']
conf_str = CE_MERGE_BGP_CONFED_PEER_AS % confed_peer_as_num
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp confed peer as failed.')
cmds = []
cmd = "confederation peer-as %s" % confed_peer_as_num
cmds.append(cmd)
return cmds
def create_bgp_confed_peer_as(self, **kwargs):
""" create_bgp_confed_peer_as """
module = kwargs["module"]
confed_peer_as_num = module.params['confed_peer_as_num']
conf_str = CE_CREATE_BGP_CONFED_PEER_AS % confed_peer_as_num
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp confed peer as failed.')
cmds = []
cmd = "confederation peer-as %s" % confed_peer_as_num
cmds.append(cmd)
return cmds
def delete_bgp_confed_peer_as(self, **kwargs):
""" delete_bgp_confed_peer_as """
module = kwargs["module"]
confed_peer_as_num = module.params['confed_peer_as_num']
conf_str = CE_DELETE_BGP_CONFED_PEER_AS % confed_peer_as_num
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp confed peer as failed.')
cmds = []
cmd = "undo confederation peer-as %s" % confed_peer_as_num
cmds.append(cmd)
return cmds
def get_bgp_instance(self, **kwargs):
""" get_bgp_instance """
module = kwargs["module"]
conf_str = CE_GET_BGP_INSTANCE
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<vrfName>(.*)</vrfName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_bgp_instance(self, **kwargs):
""" merge_bgp_instance """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
conf_str += CE_MERGE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp instance failed.')
def create_bgp_instance(self, **kwargs):
""" create_bgp_instance """
module = kwargs["module"]
conf_str = CE_CREATE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
if vrf_name:
conf_str += "<vrfName>%s</vrfName>" % vrf_name
conf_str += CE_CREATE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp instance failed.')
cmds = []
if vrf_name != "_public_":
cmd = "ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
return cmds
def delete_bgp_instance(self, **kwargs):
""" delete_bgp_instance """
module = kwargs["module"]
conf_str = CE_DELETE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
if vrf_name:
conf_str += "<vrfName>%s</vrfName>" % vrf_name
conf_str += CE_DELETE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp instance failed.')
cmds = []
if vrf_name != "_public_":
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
return cmds
def merge_bgp_instance_other(self, **kwargs):
""" merge_bgp_instance_other """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
cmds = []
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
conf_str += "<vrfRidAutoSel>%s</vrfRidAutoSel>" % vrf_rid_auto_sel
if vrf_rid_auto_sel == "true":
cmd = "router-id vpn-instance auto-select"
else:
cmd = "undo router-id vpn-instance auto-select"
cmds.append(cmd)
router_id = module.params['router_id']
if router_id:
conf_str += "<routerId>%s</routerId>" % router_id
cmd = "router-id %s" % router_id
cmds.append(cmd)
keepalive_time = module.params['keepalive_time']
if keepalive_time:
conf_str += "<keepaliveTime>%s</keepaliveTime>" % keepalive_time
cmd = "timer keepalive %s" % keepalive_time
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % hold_time
cmd = "timer hold %s" % hold_time
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % min_hold_time
cmd = "timer min-holdtime %s" % min_hold_time
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % conn_retry_time
cmd = "timer connect-retry %s" % conn_retry_time
cmds.append(cmd)
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
conf_str += "<ebgpIfSensitive>%s</ebgpIfSensitive>" % ebgp_if_sensitive
if ebgp_if_sensitive == "true":
cmd = "ebgp-interface-sensitive"
else:
cmd = "undo ebgp-interface-sensitive"
cmds.append(cmd)
default_af_type = module.params['default_af_type']
if default_af_type:
conf_str += "<defaultAfType>%s</defaultAfType>" % default_af_type
if vrf_name != "_public_":
if default_af_type == "ipv6uni":
cmd = "ipv6-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
cmd = "ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
if vrf_name != "_public_":
cmd = "ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
conf_str += CE_MERGE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp instance other failed.')
return cmds
def delete_bgp_instance_other_comm(self, **kwargs):
""" delete_bgp_instance_other_comm """
module = kwargs["module"]
conf_str = CE_DELETE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
cmds = []
router_id = module.params['router_id']
if router_id:
conf_str += "<routerId>%s</routerId>" % router_id
cmd = "undo router-id"
cmds.append(cmd)
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
conf_str += "<vrfRidAutoSel>%s</vrfRidAutoSel>" % vrf_rid_auto_sel
cmd = "undo router-id vpn-instance auto-select"
cmds.append(cmd)
keepalive_time = module.params['keepalive_time']
if keepalive_time:
conf_str += "<keepaliveTime>%s</keepaliveTime>" % keepalive_time
cmd = "undo timer keepalive"
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % hold_time
cmd = "undo timer hold"
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % min_hold_time
cmd = "undo timer min-holdtime"
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % conn_retry_time
cmd = "undo timer connect-retry"
cmds.append(cmd)
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
conf_str += "<ebgpIfSensitive>%s</ebgpIfSensitive>" % ebgp_if_sensitive
cmd = "undo ebgp-interface-sensitive"
cmds.append(cmd)
default_af_type = module.params['default_af_type']
if default_af_type:
conf_str += "<defaultAfType>%s</defaultAfType>" % default_af_type
if vrf_name != "_public_":
if default_af_type == "ipv6uni":
cmd = "undo ipv6-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
if vrf_name != "_public_":
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
conf_str += CE_DELETE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete common vpn bgp instance other args failed.')
return cmds
def delete_instance_other_public(self, **kwargs):
""" delete_instance_other_public """
module = kwargs["module"]
conf_str = CE_MERGE_BGP_INSTANCE_HEADER
vrf_name = module.params['vrf_name']
conf_str += "<vrfName>%s</vrfName>" % vrf_name
cmds = []
router_id = module.params['router_id']
if router_id:
conf_str += "<routerId></routerId>"
cmd = "undo router-id"
cmds.append(cmd)
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
if vrf_rid_auto_sel != 'no_use':
conf_str += "<vrfRidAutoSel>%s</vrfRidAutoSel>" % vrf_rid_auto_sel
cmd = "undo router-id vpn-instance auto-select"
cmds.append(cmd)
keepalive_time = module.params['keepalive_time']
if keepalive_time:
conf_str += "<keepaliveTime>%s</keepaliveTime>" % "60"
cmd = "undo timer keepalive"
cmds.append(cmd)
hold_time = module.params['hold_time']
if hold_time:
conf_str += "<holdTime>%s</holdTime>" % "180"
cmd = "undo timer hold"
cmds.append(cmd)
min_hold_time = module.params['min_hold_time']
if min_hold_time:
conf_str += "<minHoldTime>%s</minHoldTime>" % "0"
cmd = "undo timer min-holdtime"
cmds.append(cmd)
conn_retry_time = module.params['conn_retry_time']
if conn_retry_time:
conf_str += "<connRetryTime>%s</connRetryTime>" % "32"
cmd = "undo timer connect-retry"
cmds.append(cmd)
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
if ebgp_if_sensitive != 'no_use':
conf_str += "<ebgpIfSensitive>%s</ebgpIfSensitive>" % "true"
cmd = "undo ebgp-interface-sensitive"
cmds.append(cmd)
default_af_type = module.params['default_af_type']
if default_af_type:
conf_str += "<defaultAfType>%s</defaultAfType>" % "ipv4uni"
if vrf_name != "_public_":
if default_af_type == "ipv6uni":
cmd = "undo ipv6-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
else:
if vrf_name != "_public_":
cmd = "undo ipv4-family vpn-instance %s" % vrf_name
cmds.append(cmd)
conf_str += CE_MERGE_BGP_INSTANCE_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete default vpn bgp instance other args failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
as_number=dict(type='str'),
graceful_restart=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
time_wait_for_rib=dict(type='str'),
as_path_limit=dict(type='str'),
check_first_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
confed_id_number=dict(type='str'),
confed_nonstanded=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
bgp_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
memory_limit=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
gr_peer_reset=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_shutdown=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
suppress_interval=dict(type='str'),
hold_interval=dict(type='str'),
clear_interval=dict(type='str'),
confed_peer_as_num=dict(type='str'),
vrf_name=dict(type='str'),
vrf_rid_auto_sel=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
router_id=dict(type='str'),
keepalive_time=dict(type='str'),
hold_time=dict(type='str'),
min_hold_time=dict(type='str'),
conn_retry_time=dict(type='str'),
ebgp_if_sensitive=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
default_af_type=dict(type='str', choices=['ipv4uni', 'ipv6uni'])
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
as_number = module.params['as_number']
graceful_restart = module.params['graceful_restart']
time_wait_for_rib = module.params['time_wait_for_rib']
as_path_limit = module.params['as_path_limit']
check_first_as = module.params['check_first_as']
confed_id_number = module.params['confed_id_number']
confed_nonstanded = module.params['confed_nonstanded']
bgp_rid_auto_sel = module.params['bgp_rid_auto_sel']
keep_all_routes = module.params['keep_all_routes']
memory_limit = module.params['memory_limit']
gr_peer_reset = module.params['gr_peer_reset']
is_shutdown = module.params['is_shutdown']
suppress_interval = module.params['suppress_interval']
hold_interval = module.params['hold_interval']
clear_interval = module.params['clear_interval']
confed_peer_as_num = module.params['confed_peer_as_num']
router_id = module.params['router_id']
vrf_name = module.params['vrf_name']
vrf_rid_auto_sel = module.params['vrf_rid_auto_sel']
keepalive_time = module.params['keepalive_time']
hold_time = module.params['hold_time']
min_hold_time = module.params['min_hold_time']
conn_retry_time = module.params['conn_retry_time']
ebgp_if_sensitive = module.params['ebgp_if_sensitive']
default_af_type = module.params['default_af_type']
ce_bgp_obj = Bgp()
if not ce_bgp_obj:
module.fail_json(msg='Error: Init module failed.')
# get proposed
proposed["state"] = state
if as_number:
proposed["as_number"] = as_number
if graceful_restart != 'no_use':
proposed["graceful_restart"] = graceful_restart
if time_wait_for_rib:
proposed["time_wait_for_rib"] = time_wait_for_rib
if as_path_limit:
proposed["as_path_limit"] = as_path_limit
if check_first_as != 'no_use':
proposed["check_first_as"] = check_first_as
if confed_id_number:
proposed["confed_id_number"] = confed_id_number
if confed_nonstanded != 'no_use':
proposed["confed_nonstanded"] = confed_nonstanded
if bgp_rid_auto_sel != 'no_use':
proposed["bgp_rid_auto_sel"] = bgp_rid_auto_sel
if keep_all_routes != 'no_use':
proposed["keep_all_routes"] = keep_all_routes
if memory_limit != 'no_use':
proposed["memory_limit"] = memory_limit
if gr_peer_reset != 'no_use':
proposed["gr_peer_reset"] = gr_peer_reset
if is_shutdown != 'no_use':
proposed["is_shutdown"] = is_shutdown
if suppress_interval:
proposed["suppress_interval"] = suppress_interval
if hold_interval:
proposed["hold_interval"] = hold_interval
if clear_interval:
proposed["clear_interval"] = clear_interval
if confed_peer_as_num:
proposed["confed_peer_as_num"] = confed_peer_as_num
if router_id:
proposed["router_id"] = router_id
if vrf_name:
proposed["vrf_name"] = vrf_name
if vrf_rid_auto_sel != 'no_use':
proposed["vrf_rid_auto_sel"] = vrf_rid_auto_sel
if keepalive_time:
proposed["keepalive_time"] = keepalive_time
if hold_time:
proposed["hold_time"] = hold_time
if min_hold_time:
proposed["min_hold_time"] = min_hold_time
if conn_retry_time:
proposed["conn_retry_time"] = conn_retry_time
if ebgp_if_sensitive != 'no_use':
proposed["ebgp_if_sensitive"] = ebgp_if_sensitive
if default_af_type:
proposed["default_af_type"] = default_af_type
need_bgp_enable = check_bgp_enable_args(module=module)
need_bgp_enable_other_rst = ce_bgp_obj.check_bgp_enable_other_args(
module=module)
need_bgp_confed = check_bgp_confed_args(module=module)
need_bgp_instance = ce_bgp_obj.check_bgp_instance_args(module=module)
need_bgp_instance_other_rst = ce_bgp_obj.check_bgp_instance_other_args(
module=module)
# bgp enable/disable
if need_bgp_enable:
bgp_enable_exist = ce_bgp_obj.get_bgp_enable(module=module)
existing["bgp enable"] = bgp_enable_exist
asnumber_exist = bgp_enable_exist[0][0]
bgpenable_exist = bgp_enable_exist[0][1]
if state == "present":
bgp_enable_new = (as_number, "true")
if bgp_enable_new in bgp_enable_exist:
pass
elif bgpenable_exist == "true" and asnumber_exist != as_number:
module.fail_json(
msg='Error: BGP is already running. The AS is %s.' % asnumber_exist)
else:
cmd = ce_bgp_obj.merge_bgp_enable(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if need_bgp_enable_other_rst["need_cfg"] or need_bgp_confed or need_bgp_instance_other_rst["need_cfg"]:
pass
elif bgpenable_exist == "false":
pass
elif bgpenable_exist == "true" and asnumber_exist == as_number:
cmd = ce_bgp_obj.merge_bgp_enable(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
module.fail_json(
msg='Error: BGP is already running. The AS is %s.' % asnumber_exist)
bgp_enable_end = ce_bgp_obj.get_bgp_enable(module=module)
end_state["bgp enable"] = bgp_enable_end
# bgp enable/disable other args
exist_tmp = dict()
for item in need_bgp_enable_other_rst:
if item != "need_cfg":
exist_tmp[item] = need_bgp_enable_other_rst[item]
if exist_tmp:
existing["bgp enable other"] = exist_tmp
if need_bgp_enable_other_rst["need_cfg"]:
if state == "present":
cmd = ce_bgp_obj.merge_bgp_enable_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_obj.delete_bgp_enable_other(module=module)
changed = True
for item in cmd:
updates.append(item)
need_bgp_enable_other_rst = ce_bgp_obj.check_bgp_enable_other_args(
module=module)
end_tmp = dict()
for item in need_bgp_enable_other_rst:
if item != "need_cfg":
end_tmp[item] = need_bgp_enable_other_rst[item]
if end_tmp:
end_state["bgp enable other"] = end_tmp
# bgp confederation peer as
if need_bgp_confed:
confed_exist = ce_bgp_obj.get_bgp_confed_peer_as(module=module)
existing["confederation peer as"] = confed_exist
confed_new = (confed_peer_as_num)
if state == "present":
if len(confed_exist) == 0:
cmd = ce_bgp_obj.create_bgp_confed_peer_as(module=module)
changed = True
for item in cmd:
updates.append(item)
elif confed_new not in confed_exist:
cmd = ce_bgp_obj.merge_bgp_confed_peer_as(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if len(confed_exist) == 0:
pass
elif confed_new not in confed_exist:
pass
else:
cmd = ce_bgp_obj.delete_bgp_confed_peer_as(module=module)
changed = True
for item in cmd:
updates.append(item)
confed_end = ce_bgp_obj.get_bgp_confed_peer_as(module=module)
end_state["confederation peer as"] = confed_end
# bgp instance
router_id_exist = ce_bgp_obj.get_bgp_instance(module=module)
existing["bgp instance"] = router_id_exist
if need_bgp_instance:
router_id_new = (vrf_name)
if state == "present":
if len(router_id_exist) == 0:
cmd = ce_bgp_obj.create_bgp_instance(module=module)
changed = True
updates.append(cmd)
elif router_id_new not in router_id_exist:
ce_bgp_obj.merge_bgp_instance(module=module)
changed = True
else:
if not need_bgp_instance_other_rst["need_cfg"]:
if vrf_name != "_public_":
if len(router_id_exist) == 0:
pass
elif router_id_new not in router_id_exist:
pass
else:
cmd = ce_bgp_obj.delete_bgp_instance(module=module)
changed = True
for item in cmd:
updates.append(item)
router_id_end = ce_bgp_obj.get_bgp_instance(module=module)
end_state["bgp instance"] = router_id_end
# bgp instance other
exist_tmp = dict()
for item in need_bgp_instance_other_rst:
if item != "need_cfg":
exist_tmp[item] = need_bgp_instance_other_rst[item]
if exist_tmp:
existing["bgp instance other"] = exist_tmp
if need_bgp_instance_other_rst["need_cfg"]:
if state == "present":
cmd = ce_bgp_obj.merge_bgp_instance_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if vrf_name == "_public_":
cmd = ce_bgp_obj.delete_instance_other_public(
module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_obj.delete_bgp_instance_other_comm(module=module)
changed = True
for item in cmd:
updates.append(item)
need_bgp_instance_other_rst = ce_bgp_obj.check_bgp_instance_other_args(
module=module)
end_tmp = dict()
for item in need_bgp_instance_other_rst:
if item != "need_cfg":
end_tmp[item] = need_bgp_instance_other_rst[item]
if end_tmp:
end_state["bgp instance other"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
qubesuser/qubes-core-admin | qubespolicy/tests/__init__.py | 1 | 43412 | # -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2017 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
import os
import socket
import unittest.mock
import shutil
import qubes.tests
import qubespolicy
tmp_policy_dir = '/tmp/policy'
system_info = {
'domains': {
'dom0': {
'tags': ['dom0-tag'],
'type': 'AdminVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': False,
},
'test-vm1': {
'tags': ['tag1', 'tag2'],
'type': 'AppVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': False,
},
'test-vm2': {
'tags': ['tag2'],
'type': 'AppVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': False,
},
'test-vm3': {
'tags': ['tag3'],
'type': 'AppVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': True,
},
'default-dvm': {
'tags': [],
'type': 'AppVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': True,
},
'test-invalid-dvm': {
'tags': ['tag1', 'tag2'],
'type': 'AppVM',
'default_dispvm': 'test-vm1',
'template_for_dispvms': False,
},
'test-no-dvm': {
'tags': ['tag1', 'tag2'],
'type': 'AppVM',
'default_dispvm': None,
'template_for_dispvms': False,
},
'test-template': {
'tags': ['tag1', 'tag2'],
'type': 'TemplateVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': False,
},
'test-standalone': {
'tags': ['tag1', 'tag2'],
'type': 'StandaloneVM',
'default_dispvm': 'default-dvm',
'template_for_dispvms': False,
},
}
}
class TC_00_PolicyRule(qubes.tests.QubesTestCase):
def test_000_verify_target_value(self):
self.assertTrue(
qubespolicy.verify_target_value(system_info, 'test-vm1'))
self.assertTrue(
qubespolicy.verify_target_value(system_info, 'default-dvm'))
self.assertTrue(
qubespolicy.verify_target_value(system_info, '$dispvm'))
self.assertTrue(
qubespolicy.verify_target_value(system_info, '$dispvm:default-dvm'))
self.assertTrue(
qubespolicy.verify_target_value(system_info, 'test-template'))
self.assertTrue(
qubespolicy.verify_target_value(system_info, 'test-standalone'))
self.assertTrue(
qubespolicy.verify_target_value(system_info, '$adminvm'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, 'no-such-vm'))
self.assertFalse(
qubespolicy.verify_target_value(system_info,
'$dispvm:test-invalid-dvm'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, '$dispvm:test-vm1'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, ''))
self.assertFalse(
qubespolicy.verify_target_value(system_info, '$default'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, '$anyvm'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, '$tag:tag1'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, '$dispvm:$tag:tag1'))
self.assertFalse(
qubespolicy.verify_target_value(system_info, '$invalid'))
def test_010_verify_special_value(self):
self.assertTrue(qubespolicy.verify_special_value('$tag:tag',
for_target=False))
self.assertTrue(qubespolicy.verify_special_value('$tag:other-tag',
for_target=False))
self.assertTrue(qubespolicy.verify_special_value('$type:AppVM',
for_target=False))
self.assertTrue(qubespolicy.verify_special_value('$adminvm',
for_target=False))
self.assertTrue(qubespolicy.verify_special_value('$dispvm:some-vm',
for_target=True))
self.assertTrue(qubespolicy.verify_special_value('$dispvm:$tag:tag1',
for_target=True))
self.assertFalse(qubespolicy.verify_special_value('$default',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('$dispvm',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('$dispvm:some-vm',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('$dispvm:$tag:tag1',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('$invalid',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('vm-name',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('$tag:',
for_target=False))
self.assertFalse(qubespolicy.verify_special_value('$type:',
for_target=False))
def test_020_line_simple(self):
line = qubespolicy.PolicyRule('$anyvm $anyvm ask', 'filename', 12)
self.assertEqual(line.filename, 'filename')
self.assertEqual(line.lineno, 12)
self.assertEqual(line.action, qubespolicy.Action.ask)
self.assertEqual(line.source, '$anyvm')
self.assertEqual(line.target, '$anyvm')
self.assertEqual(line.full_action, 'ask')
self.assertIsNone(line.override_target)
self.assertIsNone(line.override_user)
self.assertIsNone(line.default_target)
def test_021_line_simple(self):
# also check spaces in action field
line = qubespolicy.PolicyRule(
'$tag:tag1 $type:AppVM ask, target=test-vm2, user=user',
'filename', 12)
self.assertEqual(line.filename, 'filename')
self.assertEqual(line.lineno, 12)
self.assertEqual(line.action, qubespolicy.Action.ask)
self.assertEqual(line.source, '$tag:tag1')
self.assertEqual(line.target, '$type:AppVM')
self.assertEqual(line.full_action, 'ask, target=test-vm2, user=user')
self.assertEqual(line.override_target, 'test-vm2')
self.assertEqual(line.override_user, 'user')
self.assertIsNone(line.default_target)
def test_022_line_simple(self):
line = qubespolicy.PolicyRule(
'$anyvm $default allow,target=$dispvm:test-vm2',
'filename', 12)
self.assertEqual(line.filename, 'filename')
self.assertEqual(line.lineno, 12)
self.assertEqual(line.action, qubespolicy.Action.allow)
self.assertEqual(line.source, '$anyvm')
self.assertEqual(line.target, '$default')
self.assertEqual(line.full_action, 'allow,target=$dispvm:test-vm2')
self.assertEqual(line.override_target, '$dispvm:test-vm2')
self.assertIsNone(line.override_user)
self.assertIsNone(line.default_target)
def test_023_line_simple(self):
line = qubespolicy.PolicyRule(
'$anyvm $default ask,default_target=test-vm1',
'filename', 12)
self.assertEqual(line.filename, 'filename')
self.assertEqual(line.lineno, 12)
self.assertEqual(line.action, qubespolicy.Action.ask)
self.assertEqual(line.source, '$anyvm')
self.assertEqual(line.target, '$default')
self.assertEqual(line.full_action, 'ask,default_target=test-vm1')
self.assertIsNone(line.override_target)
self.assertIsNone(line.override_user)
self.assertEqual(line.default_target, 'test-vm1')
def test_024_line_simple(self):
line = qubespolicy.PolicyRule(
'$anyvm $adminvm ask,default_target=$adminvm',
'filename', 12)
self.assertEqual(line.filename, 'filename')
self.assertEqual(line.lineno, 12)
self.assertEqual(line.action, qubespolicy.Action.ask)
self.assertEqual(line.source, '$anyvm')
self.assertEqual(line.target, '$adminvm')
self.assertEqual(line.full_action, 'ask,default_target=$adminvm')
self.assertIsNone(line.override_target)
self.assertIsNone(line.override_user)
self.assertEqual(line.default_target, '$adminvm')
def test_030_line_invalid(self):
invalid_lines = [
'$dispvm $default allow', # $dispvm can't be a source
'$default $default allow', # $default can't be a source
'$anyvm $default allow,target=$dispvm:$tag:tag1', # $dispvm:$tag
# as override target
'$anyvm $default allow,target=$tag:tag1', # $tag as override target
'$anyvm $default deny,target=test-vm1', # target= used with deny
'$anyvm $anyvm deny,default_target=test-vm1', # default_target=
# with deny
'$anyvm $anyvm deny,user=user', # user= with deny
'$anyvm $anyvm invalid', # invalid action
'$anyvm $anyvm allow,invalid=xx', # invalid option
'$anyvm $anyvm', # missing action
'$anyvm $anyvm allow,default_target=test-vm1', # default_target=
# with allow
'$invalid $anyvm allow', # invalid source
'$anyvm $invalid deny', # invalid target
'', # empty line
'$anyvm $anyvm allow extra', # trailing words
'$anyvm $default allow', # $default allow without target=
]
for line in invalid_lines:
with self.subTest(line):
with self.assertRaises(qubespolicy.PolicySyntaxError):
qubespolicy.PolicyRule(line, 'filename', 12)
def test_040_match_single(self):
is_match_single = qubespolicy.PolicyRule.is_match_single
self.assertTrue(is_match_single(system_info, '$anyvm', 'test-vm1'))
self.assertTrue(is_match_single(system_info, '$anyvm', '$default'))
self.assertTrue(is_match_single(system_info, '$anyvm', ''))
self.assertTrue(is_match_single(system_info, '$default', ''))
self.assertTrue(is_match_single(system_info, '$default', '$default'))
self.assertTrue(is_match_single(system_info, '$tag:tag1', 'test-vm1'))
self.assertTrue(is_match_single(system_info, '$type:AppVM', 'test-vm1'))
self.assertTrue(is_match_single(system_info,
'$type:TemplateVM', 'test-template'))
self.assertTrue(is_match_single(system_info, '$anyvm', '$dispvm'))
self.assertTrue(is_match_single(system_info,
'$anyvm', '$dispvm:default-dvm'))
self.assertTrue(is_match_single(system_info, '$dispvm', '$dispvm'))
self.assertTrue(is_match_single(system_info,
'$dispvm:$tag:tag3', '$dispvm:test-vm3'))
self.assertTrue(is_match_single(system_info, '$adminvm', '$adminvm'))
self.assertTrue(is_match_single(system_info, '$adminvm', 'dom0'))
self.assertTrue(is_match_single(system_info, 'dom0', '$adminvm'))
self.assertTrue(is_match_single(system_info, 'dom0', 'dom0'))
self.assertTrue(is_match_single(system_info,
'$dispvm:default-dvm', '$dispvm:default-dvm'))
self.assertTrue(is_match_single(system_info, '$anyvm', '$dispvm'))
self.assertTrue(is_match_single(system_info, '$anyvm', 'test-vm1'))
self.assertTrue(is_match_single(system_info, '$anyvm', 'test-vm1'))
self.assertTrue(is_match_single(system_info, '$anyvm', 'test-vm1'))
self.assertFalse(is_match_single(system_info, '$default', 'test-vm1'))
self.assertFalse(is_match_single(system_info, '$tag:tag1', 'test-vm3'))
self.assertFalse(is_match_single(system_info, '$anyvm', 'no-such-vm'))
# test-vm1.template_for_dispvms=False
self.assertFalse(is_match_single(system_info,
'$anyvm', '$dispvm:test-vm1'))
# test-vm1.template_for_dispvms=False
self.assertFalse(is_match_single(system_info,
'$dispvm:test-vm1', '$dispvm:test-vm1'))
self.assertFalse(is_match_single(system_info,
'$dispvm:$tag:tag1', '$dispvm:test-vm1'))
# test-vm3 has not tag1
self.assertFalse(is_match_single(system_info,
'$dispvm:$tag:tag1', '$dispvm:test-vm3'))
# default-dvm has no tag3
self.assertFalse(is_match_single(system_info,
'$dispvm:$tag:tag3', '$dispvm:default-dvm'))
self.assertFalse(is_match_single(system_info, '$anyvm', 'dom0'))
self.assertFalse(is_match_single(system_info, '$anyvm', '$adminvm'))
self.assertFalse(is_match_single(system_info,
'$tag:dom0-tag', '$adminvm'))
self.assertFalse(is_match_single(system_info,
'$type:AdminVM', '$adminvm'))
self.assertFalse(is_match_single(system_info,
'$tag:dom0-tag', 'dom0'))
self.assertFalse(is_match_single(system_info,
'$type:AdminVM', 'dom0'))
self.assertFalse(is_match_single(system_info, '$tag:tag1', 'dom0'))
self.assertFalse(is_match_single(system_info, '$anyvm', '$tag:tag1'))
self.assertFalse(is_match_single(system_info, '$anyvm', '$type:AppVM'))
self.assertFalse(is_match_single(system_info, '$anyvm', '$invalid'))
self.assertFalse(is_match_single(system_info, '$invalid', '$invalid'))
self.assertFalse(is_match_single(system_info, '$anyvm', 'no-such-vm'))
self.assertFalse(is_match_single(system_info,
'no-such-vm', 'no-such-vm'))
self.assertFalse(is_match_single(system_info, '$dispvm', 'test-vm1'))
self.assertFalse(is_match_single(system_info, '$dispvm', 'default-dvm'))
self.assertFalse(is_match_single(system_info,
'$dispvm:default-dvm', 'default-dvm'))
self.assertFalse(is_match_single(system_info, '$anyvm', 'test-vm1\n'))
self.assertFalse(is_match_single(system_info, '$anyvm', 'test-vm1 '))
def test_050_match(self):
line = qubespolicy.PolicyRule('$anyvm $anyvm allow')
self.assertTrue(line.is_match(system_info, 'test-vm1', 'test-vm2'))
line = qubespolicy.PolicyRule('$anyvm $anyvm allow')
self.assertFalse(line.is_match(system_info, 'no-such-vm', 'test-vm2'))
line = qubespolicy.PolicyRule('$anyvm $anyvm allow')
self.assertFalse(line.is_match(system_info, 'test-vm1', 'no-such-vm'))
line = qubespolicy.PolicyRule('$anyvm $dispvm allow')
self.assertTrue(line.is_match(system_info, 'test-vm1', '$dispvm'))
line = qubespolicy.PolicyRule('$anyvm $dispvm allow')
self.assertFalse(line.is_match(system_info,
'test-vm1', '$dispvm:default-dvm'))
line = qubespolicy.PolicyRule('$anyvm $dispvm:default-dvm allow')
self.assertTrue(line.is_match(system_info, 'test-vm1', '$dispvm'))
line = qubespolicy.PolicyRule('$anyvm $dispvm:default-dvm allow')
self.assertTrue(line.is_match(system_info,
'test-vm1', '$dispvm:default-dvm'))
line = qubespolicy.PolicyRule('$anyvm $dispvm:$tag:tag3 allow')
self.assertTrue(line.is_match(system_info,
'test-vm1', '$dispvm:test-vm3'))
def test_060_expand_target(self):
lines = {
'$anyvm $anyvm allow': ['test-vm1', 'test-vm2', 'test-vm3',
'$dispvm:test-vm3',
'default-dvm', '$dispvm:default-dvm', 'test-invalid-dvm',
'test-no-dvm', 'test-template', 'test-standalone', '$dispvm'],
'$anyvm $dispvm allow': ['$dispvm'],
'$anyvm $dispvm:default-dvm allow': ['$dispvm:default-dvm'],
# no DispVM from test-vm1 allowed
'$anyvm $dispvm:test-vm1 allow': [],
'$anyvm $dispvm:test-vm3 allow': ['$dispvm:test-vm3'],
'$anyvm $dispvm:$tag:tag1 allow': [],
'$anyvm $dispvm:$tag:tag3 allow': ['$dispvm:test-vm3'],
'$anyvm test-vm1 allow': ['test-vm1'],
'$anyvm $type:AppVM allow': ['test-vm1', 'test-vm2', 'test-vm3',
'default-dvm', 'test-invalid-dvm', 'test-no-dvm'],
'$anyvm $type:TemplateVM allow': ['test-template'],
'$anyvm $tag:tag1 allow': ['test-vm1', 'test-invalid-dvm',
'test-template', 'test-standalone', 'test-no-dvm'],
'$anyvm $tag:tag2 allow': ['test-vm1', 'test-vm2',
'test-invalid-dvm', 'test-template', 'test-standalone',
'test-no-dvm'],
'$anyvm $tag:no-such-tag allow': [],
}
for line in lines:
with self.subTest(line):
policy_line = qubespolicy.PolicyRule(line)
self.assertCountEqual(list(policy_line.expand_target(system_info)),
lines[line])
def test_070_expand_override_target(self):
line = qubespolicy.PolicyRule(
'$anyvm $anyvm allow,target=test-vm2')
self.assertEqual(
line.expand_override_target(system_info, 'test-vm1'),
'test-vm2')
def test_071_expand_override_target_dispvm(self):
line = qubespolicy.PolicyRule(
'$anyvm $anyvm allow,target=$dispvm')
self.assertEqual(
line.expand_override_target(system_info, 'test-vm1'),
'$dispvm:default-dvm')
def test_072_expand_override_target_dispvm_specific(self):
line = qubespolicy.PolicyRule(
'$anyvm $anyvm allow,target=$dispvm:test-vm3')
self.assertEqual(
line.expand_override_target(system_info, 'test-vm1'),
'$dispvm:test-vm3')
def test_073_expand_override_target_dispvm_none(self):
line = qubespolicy.PolicyRule(
'$anyvm $anyvm allow,target=$dispvm')
self.assertEqual(
line.expand_override_target(system_info, 'test-no-dvm'),
None)
def test_074_expand_override_target_dom0(self):
line = qubespolicy.PolicyRule(
'$anyvm $anyvm allow,target=dom0')
self.assertEqual(
line.expand_override_target(system_info, 'test-no-dvm'),
'dom0')
def test_075_expand_override_target_dom0(self):
line = qubespolicy.PolicyRule(
'$anyvm $anyvm allow,target=$adminvm')
self.assertEqual(
line.expand_override_target(system_info, 'test-no-dvm'),
'$adminvm')
class TC_10_PolicyAction(qubes.tests.QubesTestCase):
def test_000_init(self):
rule = qubespolicy.PolicyRule('$anyvm $anyvm deny')
with self.assertRaises(qubespolicy.AccessDenied):
qubespolicy.PolicyAction('test.service', 'test-vm1', 'test-vm2',
rule, 'test-vm2')
def test_001_init(self):
rule = qubespolicy.PolicyRule('$anyvm $anyvm ask')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule, 'test-vm2', ['test-vm2', 'test-vm3'])
self.assertEqual(action.service, 'test.service')
self.assertEqual(action.source, 'test-vm1')
self.assertIsNone(action.target)
self.assertEqual(action.original_target, 'test-vm2')
self.assertEqual(action.targets_for_ask, ['test-vm2', 'test-vm3'])
self.assertEqual(action.rule, rule)
self.assertEqual(action.action, qubespolicy.Action.ask)
def test_002_init_invalid(self):
rule_ask = qubespolicy.PolicyRule('$anyvm $anyvm ask')
rule_allow = qubespolicy.PolicyRule('$anyvm $anyvm allow')
with self.assertRaises(AssertionError):
qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule_allow, 'test-vm2', None)
with self.assertRaises(AssertionError):
qubespolicy.PolicyAction('test.service', 'test-vm1',
'test-vm2', rule_allow, 'test-vm2', ['test-vm2', 'test-vm3'])
with self.assertRaises(AssertionError):
qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule_ask, 'test-vm2', None)
def test_003_init_default_target(self):
rule_ask = qubespolicy.PolicyRule('$anyvm $anyvm ask')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'test-vm1', rule_ask, 'test-vm2', ['test-vm2'])
self.assertIsNone(action.target)
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'test-vm2', rule_ask, 'test-vm2', ['test-vm2'])
self.assertEqual(action.target, 'test-vm2')
def test_010_handle_user_response(self):
rule = qubespolicy.PolicyRule('$anyvm $anyvm ask')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule, 'test-vm2', ['test-vm2', 'test-vm3'])
action.handle_user_response(True, 'test-vm2')
self.assertEqual(action.action, qubespolicy.Action.allow)
self.assertEqual(action.target, 'test-vm2')
def test_011_handle_user_response(self):
rule = qubespolicy.PolicyRule('$anyvm $anyvm ask')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule, 'test-vm2', ['test-vm2', 'test-vm3'])
with self.assertRaises(AssertionError):
action.handle_user_response(True, 'test-no-dvm')
def test_012_handle_user_response(self):
rule = qubespolicy.PolicyRule('$anyvm $anyvm ask')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule, 'test-vm2', ['test-vm2', 'test-vm3'])
with self.assertRaises(qubespolicy.AccessDenied):
action.handle_user_response(False, None)
self.assertEqual(action.action, qubespolicy.Action.deny)
def test_013_handle_user_response_with_default_target(self):
rule = qubespolicy.PolicyRule(
'$anyvm $anyvm ask,default_target=test-vm2')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
None, rule, 'test-vm2', ['test-vm2', 'test-vm3'])
action.handle_user_response(True, 'test-vm2')
self.assertEqual(action.action, qubespolicy.Action.allow)
self.assertEqual(action.target, 'test-vm2')
@unittest.mock.patch('qubespolicy.qubesd_call')
@unittest.mock.patch('subprocess.call')
def test_020_execute(self, mock_subprocess, mock_qubesd_call):
rule = qubespolicy.PolicyRule('$anyvm $anyvm allow')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'test-vm2', rule, 'test-vm2')
action.execute('some-ident')
self.assertEqual(mock_qubesd_call.mock_calls,
[unittest.mock.call('test-vm2', 'admin.vm.Start')])
self.assertEqual(mock_subprocess.mock_calls,
[unittest.mock.call([qubespolicy.QREXEC_CLIENT, '-d', 'test-vm2',
'-c', 'some-ident', 'DEFAULT:QUBESRPC test.service test-vm1'])])
@unittest.mock.patch('qubespolicy.qubesd_call')
@unittest.mock.patch('subprocess.call')
def test_021_execute_dom0(self, mock_subprocess, mock_qubesd_call):
rule = qubespolicy.PolicyRule('$anyvm dom0 allow')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'dom0', rule, 'dom0')
action.execute('some-ident')
self.assertEqual(mock_qubesd_call.mock_calls, [])
self.assertEqual(mock_subprocess.mock_calls,
[unittest.mock.call([qubespolicy.QREXEC_CLIENT, '-d', 'dom0',
'-c', 'some-ident',
qubespolicy.QUBES_RPC_MULTIPLEXER_PATH +
' test.service test-vm1 dom0'])])
@unittest.mock.patch('qubespolicy.qubesd_call')
@unittest.mock.patch('subprocess.call')
def test_022_execute_dispvm(self, mock_subprocess, mock_qubesd_call):
rule = qubespolicy.PolicyRule('$anyvm $dispvm:default-dvm allow')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'$dispvm:default-dvm', rule, '$dispvm:default-dvm')
mock_qubesd_call.side_effect = (lambda target, call:
b'dispvm-name' if call == 'admin.vm.CreateDisposable' else
unittest.mock.DEFAULT)
action.execute('some-ident')
self.assertEqual(mock_qubesd_call.mock_calls,
[unittest.mock.call('default-dvm', 'admin.vm.CreateDisposable'),
unittest.mock.call('dispvm-name', 'admin.vm.Start'),
unittest.mock.call('dispvm-name', 'admin.vm.Kill')])
self.assertEqual(mock_subprocess.mock_calls,
[unittest.mock.call([qubespolicy.QREXEC_CLIENT, '-d', 'dispvm-name',
'-c', 'some-ident', '-W',
'DEFAULT:QUBESRPC test.service test-vm1'])])
@unittest.mock.patch('qubespolicy.qubesd_call')
@unittest.mock.patch('subprocess.call')
def test_023_execute_already_running(self, mock_subprocess,
mock_qubesd_call):
rule = qubespolicy.PolicyRule('$anyvm $anyvm allow')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'test-vm2', rule, 'test-vm2')
mock_qubesd_call.side_effect = \
qubespolicy.QubesMgmtException('QubesVMNotHaltedError')
action.execute('some-ident')
self.assertEqual(mock_qubesd_call.mock_calls,
[unittest.mock.call('test-vm2', 'admin.vm.Start')])
self.assertEqual(mock_subprocess.mock_calls,
[unittest.mock.call([qubespolicy.QREXEC_CLIENT, '-d', 'test-vm2',
'-c', 'some-ident', 'DEFAULT:QUBESRPC test.service test-vm1'])])
@unittest.mock.patch('qubespolicy.qubesd_call')
@unittest.mock.patch('subprocess.call')
def test_024_execute_startup_error(self, mock_subprocess,
mock_qubesd_call):
rule = qubespolicy.PolicyRule('$anyvm $anyvm allow')
action = qubespolicy.PolicyAction('test.service', 'test-vm1',
'test-vm2', rule, 'test-vm2')
mock_qubesd_call.side_effect = \
qubespolicy.QubesMgmtException('QubesVMError')
with self.assertRaises(qubespolicy.QubesMgmtException):
action.execute('some-ident')
self.assertEqual(mock_qubesd_call.mock_calls,
[unittest.mock.call('test-vm2', 'admin.vm.Start')])
self.assertEqual(mock_subprocess.mock_calls, [])
class TC_20_Policy(qubes.tests.QubesTestCase):
def setUp(self):
super(TC_20_Policy, self).setUp()
if not os.path.exists(tmp_policy_dir):
os.mkdir(tmp_policy_dir)
def tearDown(self):
shutil.rmtree(tmp_policy_dir)
super(TC_20_Policy, self).tearDown()
def test_000_load(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('\n')
f.write('# comment\n')
f.write('test-vm2 test-vm3 ask\n')
f.write(' # comment \n')
f.write('$anyvm $anyvm ask\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
self.assertEqual(policy.service, 'test.service')
self.assertEqual(len(policy.policy_rules), 3)
self.assertEqual(policy.policy_rules[0].source, 'test-vm1')
self.assertEqual(policy.policy_rules[0].target, 'test-vm2')
self.assertEqual(policy.policy_rules[0].action,
qubespolicy.Action.allow)
def test_001_not_existent(self):
with self.assertRaises(qubespolicy.AccessDenied):
qubespolicy.Policy('no-such.service', tmp_policy_dir)
def test_002_include(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('$include:test.service2\n')
f.write('$anyvm $anyvm deny\n')
with open(os.path.join(tmp_policy_dir, 'test.service2'), 'w') as f:
f.write('test-vm3 $default allow,target=test-vm2\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
self.assertEqual(policy.service, 'test.service')
self.assertEqual(len(policy.policy_rules), 3)
self.assertEqual(policy.policy_rules[0].source, 'test-vm1')
self.assertEqual(policy.policy_rules[0].target, 'test-vm2')
self.assertEqual(policy.policy_rules[0].action,
qubespolicy.Action.allow)
self.assertEqual(policy.policy_rules[0].filename,
tmp_policy_dir + '/test.service')
self.assertEqual(policy.policy_rules[0].lineno, 1)
self.assertEqual(policy.policy_rules[1].source, 'test-vm3')
self.assertEqual(policy.policy_rules[1].target, '$default')
self.assertEqual(policy.policy_rules[1].action,
qubespolicy.Action.allow)
self.assertEqual(policy.policy_rules[1].filename,
tmp_policy_dir + '/test.service2')
self.assertEqual(policy.policy_rules[1].lineno, 1)
self.assertEqual(policy.policy_rules[2].source, '$anyvm')
self.assertEqual(policy.policy_rules[2].target, '$anyvm')
self.assertEqual(policy.policy_rules[2].action,
qubespolicy.Action.deny)
self.assertEqual(policy.policy_rules[2].filename,
tmp_policy_dir + '/test.service')
self.assertEqual(policy.policy_rules[2].lineno, 3)
def test_010_find_rule(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('test-vm1 $anyvm ask\n')
f.write('test-vm2 $tag:tag1 deny\n')
f.write('test-vm2 $tag:tag2 allow\n')
f.write('test-vm2 $dispvm:$tag:tag3 allow\n')
f.write('test-vm2 $dispvm:$tag:tag2 allow\n')
f.write('test-vm2 $dispvm:default-dvm allow\n')
f.write('$type:AppVM $default allow,target=test-vm3\n')
f.write('$tag:tag1 $type:AppVM allow\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm1', 'test-vm2'), policy.policy_rules[0])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm1', 'test-vm3'), policy.policy_rules[1])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm2', 'test-vm2'), policy.policy_rules[3])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm2', 'test-no-dvm'), policy.policy_rules[2])
# $anyvm matches $default too
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm1', ''), policy.policy_rules[1])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm2', ''), policy.policy_rules[7])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm2', '$default'), policy.policy_rules[7])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-no-dvm', 'test-vm3'), policy.policy_rules[8])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm2', '$dispvm:test-vm3'),
policy.policy_rules[4])
self.assertEqual(policy.find_matching_rule(
system_info, 'test-vm2', '$dispvm'),
policy.policy_rules[6])
with self.assertRaises(qubespolicy.AccessDenied):
policy.find_matching_rule(
system_info, 'test-no-dvm', 'test-standalone')
with self.assertRaises(qubespolicy.AccessDenied):
policy.find_matching_rule(system_info, 'test-no-dvm', '$dispvm')
with self.assertRaises(qubespolicy.AccessDenied):
policy.find_matching_rule(
system_info, 'test-standalone', '$default')
def test_020_collect_targets_for_ask(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('test-vm1 $anyvm ask\n')
f.write('test-vm2 $tag:tag1 deny\n')
f.write('test-vm2 $tag:tag2 allow\n')
f.write('test-no-dvm $type:AppVM deny\n')
f.write('$type:AppVM $default allow,target=test-vm3\n')
f.write('$tag:tag1 $type:AppVM allow\n')
f.write('test-no-dvm $dispvm allow\n')
f.write('test-standalone $dispvm allow\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
self.assertCountEqual(policy.collect_targets_for_ask(system_info,
'test-vm1'), ['test-vm1', 'test-vm2', 'test-vm3',
'$dispvm:test-vm3',
'default-dvm', '$dispvm:default-dvm', 'test-invalid-dvm',
'test-no-dvm', 'test-template', 'test-standalone'])
self.assertCountEqual(policy.collect_targets_for_ask(system_info,
'test-vm2'), ['test-vm2', 'test-vm3'])
self.assertCountEqual(policy.collect_targets_for_ask(system_info,
'test-vm3'), ['test-vm3'])
self.assertCountEqual(policy.collect_targets_for_ask(system_info,
'test-standalone'), ['test-vm1', 'test-vm2', 'test-vm3',
'default-dvm', 'test-no-dvm', 'test-invalid-dvm',
'$dispvm:default-dvm'])
self.assertCountEqual(policy.collect_targets_for_ask(system_info,
'test-no-dvm'), [])
def test_030_eval_simple(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
action = policy.evaluate(system_info, 'test-vm1', 'test-vm2')
self.assertEqual(action.rule, policy.policy_rules[0])
self.assertEqual(action.action, qubespolicy.Action.allow)
self.assertEqual(action.target, 'test-vm2')
self.assertEqual(action.original_target, 'test-vm2')
self.assertEqual(action.service, 'test.service')
self.assertIsNone(action.targets_for_ask)
with self.assertRaises(qubespolicy.AccessDenied):
policy.evaluate(system_info, 'test-vm2', '$default')
def test_031_eval_default(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('test-vm1 $default allow,target=test-vm2\n')
f.write('$tag:tag1 test-vm2 ask\n')
f.write('$tag:tag2 $anyvm allow\n')
f.write('test-vm3 $anyvm deny\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
action = policy.evaluate(system_info, 'test-vm1', '$default')
self.assertEqual(action.rule, policy.policy_rules[1])
self.assertEqual(action.action, qubespolicy.Action.allow)
self.assertEqual(action.target, 'test-vm2')
self.assertEqual(action.original_target, '$default')
self.assertEqual(action.service, 'test.service')
self.assertIsNone(action.targets_for_ask)
with self.assertRaises(qubespolicy.AccessDenied):
# action allow should hit, but no target specified (either by
# caller or policy)
policy.evaluate(system_info, 'test-standalone', '$default')
def test_032_eval_ask(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('test-vm1 $default allow,target=test-vm2\n')
f.write('$tag:tag1 test-vm2 ask\n')
f.write('$tag:tag1 test-vm3 ask,default_target=test-vm3\n')
f.write('$tag:tag2 $anyvm allow\n')
f.write('test-vm3 $anyvm deny\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
action = policy.evaluate(system_info, 'test-standalone', 'test-vm2')
self.assertEqual(action.rule, policy.policy_rules[2])
self.assertEqual(action.action, qubespolicy.Action.ask)
self.assertIsNone(action.target)
self.assertEqual(action.original_target, 'test-vm2')
self.assertEqual(action.service, 'test.service')
self.assertCountEqual(action.targets_for_ask,
['test-vm1', 'test-vm2', 'test-vm3', '$dispvm:test-vm3',
'default-dvm', '$dispvm:default-dvm', 'test-invalid-dvm',
'test-no-dvm', 'test-template', 'test-standalone'])
def test_033_eval_ask(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm1 test-vm2 allow\n')
f.write('test-vm1 $default allow,target=test-vm2\n')
f.write('$tag:tag1 test-vm2 ask\n')
f.write('$tag:tag1 test-vm3 ask,default_target=test-vm3\n')
f.write('$tag:tag2 $anyvm allow\n')
f.write('test-vm3 $anyvm deny\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
action = policy.evaluate(system_info, 'test-standalone', 'test-vm3')
self.assertEqual(action.rule, policy.policy_rules[3])
self.assertEqual(action.action, qubespolicy.Action.ask)
self.assertEqual(action.target, 'test-vm3')
self.assertEqual(action.original_target, 'test-vm3')
self.assertEqual(action.service, 'test.service')
self.assertCountEqual(action.targets_for_ask,
['test-vm1', 'test-vm2', 'test-vm3', '$dispvm:test-vm3',
'default-dvm', '$dispvm:default-dvm', 'test-invalid-dvm',
'test-no-dvm', 'test-template', 'test-standalone'])
def test_034_eval_resolve_dispvm(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm3 $dispvm allow\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
action = policy.evaluate(system_info, 'test-vm3', '$dispvm')
self.assertEqual(action.rule, policy.policy_rules[0])
self.assertEqual(action.action, qubespolicy.Action.allow)
self.assertEqual(action.target, '$dispvm:default-dvm')
self.assertEqual(action.original_target, '$dispvm')
self.assertEqual(action.service, 'test.service')
self.assertIsNone(action.targets_for_ask)
def test_035_eval_resolve_dispvm_fail(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-no-dvm $dispvm allow\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
with self.assertRaises(qubespolicy.AccessDenied):
policy.evaluate(system_info, 'test-no-dvm', '$dispvm')
def test_036_eval_invalid_override_target(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm3 $anyvm allow,target=no-such-vm\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
with self.assertRaises(qubespolicy.AccessDenied):
policy.evaluate(system_info, 'test-vm3', '$default')
def test_037_eval_ask_no_targets(self):
with open(os.path.join(tmp_policy_dir, 'test.service'), 'w') as f:
f.write('test-vm3 $default ask\n')
policy = qubespolicy.Policy('test.service', tmp_policy_dir)
with self.assertRaises(qubespolicy.AccessDenied):
policy.evaluate(system_info, 'test-vm3', '$default')
class TC_30_Misc(qubes.tests.QubesTestCase):
@unittest.mock.patch('socket.socket')
def test_000_qubesd_call(self, mock_socket):
mock_config = {
'return_value.makefile.return_value.read.return_value': b'0\x00data'
}
mock_socket.configure_mock(**mock_config)
result = qubespolicy.qubesd_call('test', 'internal.method')
self.assertEqual(result, b'data')
self.assertEqual(mock_socket.mock_calls, [
unittest.mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
unittest.mock.call().connect(qubespolicy.QUBESD_INTERNAL_SOCK),
unittest.mock.call().sendall(b'dom0'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'internal.method'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'test'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().shutdown(socket.SHUT_WR),
unittest.mock.call().makefile('rb'),
unittest.mock.call().makefile().read(),
])
@unittest.mock.patch('socket.socket')
def test_001_qubesd_call_arg_payload(self, mock_socket):
mock_config = {
'return_value.makefile.return_value.read.return_value': b'0\x00data'
}
mock_socket.configure_mock(**mock_config)
result = qubespolicy.qubesd_call('test', 'internal.method', 'arg',
b'payload')
self.assertEqual(result, b'data')
self.assertEqual(mock_socket.mock_calls, [
unittest.mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
unittest.mock.call().connect(qubespolicy.QUBESD_INTERNAL_SOCK),
unittest.mock.call().sendall(b'dom0'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'internal.method'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'test'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'arg'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'payload'),
unittest.mock.call().shutdown(socket.SHUT_WR),
unittest.mock.call().makefile('rb'),
unittest.mock.call().makefile().read(),
])
@unittest.mock.patch('socket.socket')
def test_002_qubesd_call_exception(self, mock_socket):
mock_config = {
'return_value.makefile.return_value.read.return_value':
b'2\x00SomeError\x00traceback\x00message\x00'
}
mock_socket.configure_mock(**mock_config)
with self.assertRaises(qubespolicy.QubesMgmtException) as e:
qubespolicy.qubesd_call('test', 'internal.method')
self.assertEqual(e.exception.exc_type, 'SomeError')
self.assertEqual(mock_socket.mock_calls, [
unittest.mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
unittest.mock.call().connect(qubespolicy.QUBESD_INTERNAL_SOCK),
unittest.mock.call().sendall(b'dom0'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'internal.method'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'test'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().sendall(b'\x00'),
unittest.mock.call().shutdown(socket.SHUT_WR),
unittest.mock.call().makefile('rb'),
unittest.mock.call().makefile().read(),
])
| gpl-2.0 |
komakallio/komahub | tools/factoryreset.py | 1 | 1319 | import sys
import TeensyRawhid
KOMAHUB_VID = 0x1209
KOMAHUB_PID = 0x4242
def usage():
print 'usage: factoryreset.py <serialnumber> <r6ohms> <r7ohms> <boardrevision>'
print ' serialnumber - serial number of the device'
print ' r6ohms - measured value of the r6 resistor (ohms)'
print ' r7ohms - measured value of the r7 resistor (ohms)'
print ' boardrevision - board revision, e.g. value 16 (0x10) for 1.0'
def cmdlineargs(args):
if len(args) != 5:
usage()
sys.exit(1)
return (int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
def inttobytes(n):
return (n&0xFF, (n&0xFF00)>>8)
def factoryreset(serial, r6ohms, r7ohms, boardrevision):
rh = TeensyRawhid.Rawhid()
buffer = [ord('K')]
buffer.append(0xFA)
for b in inttobytes(serial): buffer.append(b)
for b in inttobytes(r6ohms): buffer.append(b)
for b in inttobytes(r7ohms): buffer.append(b)
buffer.append(boardrevision)
while len(buffer) < 64:
buffer.append(0)
print buffer
rh.open(vid=KOMAHUB_VID, pid=KOMAHUB_PID)
rh.send("".join(map(lambda x:chr(x), buffer)), 1000)
rh.close()
if __name__ == '__main__':
(serial, r6ohms, r7ohms, boardrevision) = cmdlineargs(sys.argv)
factoryreset(serial, r6ohms, r7ohms, boardrevision)
| mit |
windskyer/nova | nova/api/openstack/compute/legacy_v2/contrib/flavormanage.py | 63 | 4241 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
if name is None:
msg = _("A valid name parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
flavorid = vals.get('id')
memory = vals.get('ram')
if memory is None:
msg = _("A valid ram parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
vcpus = vals.get('vcpus')
if vcpus is None:
msg = _("A valid vcpus parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
root_gb = vals.get('disk')
if root_gb is None:
msg = _("A valid disk parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.FlavorCreateFailed as exc:
raise webob.exc.HTTPInternalServerError(explanation=
exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| gpl-2.0 |
brennie/reviewboard | reviewboard/hostingsvcs/tests/test_hosting_service_auth_form.py | 3 | 16079 | from __future__ import unicode_literals
from reviewboard.hostingsvcs.errors import (AuthorizationError,
TwoFactorAuthCodeRequiredError)
from reviewboard.hostingsvcs.forms import HostingServiceAuthForm
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.service import (register_hosting_service,
unregister_hosting_service)
from reviewboard.scmtools.models import Tool
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
from reviewboard.testing.hosting_services import (SelfHostedTestService,
TestService)
class HostingServiceAuthFormTests(TestCase):
"""Unit tests for reviewboard.hostingsvcs.forms.HostingServiceAuthForm."""
fixtures = ['test_scmtools']
def setUp(self):
super(HostingServiceAuthFormTests, self).setUp()
register_hosting_service('test', TestService)
register_hosting_service('self_hosted_test', SelfHostedTestService)
self.git_tool_id = Tool.objects.get(name='Git').pk
def tearDown(self):
super(HostingServiceAuthFormTests, self).tearDown()
unregister_hosting_service('self_hosted_test')
unregister_hosting_service('test')
def test_override_help_texts(self):
"""Testing HostingServiceAuthForm subclasses overriding help texts"""
class MyAuthForm(HostingServiceAuthForm):
class Meta:
help_texts = {
'hosting_account_username': 'My help text.',
}
form = MyAuthForm(hosting_service_cls=TestService)
self.assertEqual(form.fields['hosting_account_username'].help_text,
'My help text.')
def test_override_labels(self):
"""Testing HostingServiceAuthForm subclasses overriding labels"""
class MyAuthForm(HostingServiceAuthForm):
class Meta:
labels = {
'hosting_account_username': 'My label.',
}
form = MyAuthForm(hosting_service_cls=TestService)
self.assertEqual(form.fields['hosting_account_username'].label,
'My label.')
def test_get_credentials_default(self):
"""Testing HostingServiceAuthForm.get_credentials default behavior"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
self.assertEqual(
form.get_credentials(),
{
'username': 'myuser',
'password': 'mypass',
})
def test_get_credentials_default_with_2fa_code(self):
"""Testing HostingServiceAuthForm.get_credentials default behavior
with two-factor auth code
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
'hosting_account_two_factor_auth_code': '123456',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
self.assertEqual(
form.get_credentials(),
{
'username': 'myuser',
'password': 'mypass',
'two_factor_auth_code': '123456',
})
def test_get_credentials_with_form_prefix(self):
"""Testing HostingServiceAuthForm.get_credentials default behavior
with form prefix
"""
form = HostingServiceAuthForm(
{
'myservice-hosting_account_username': 'myuser',
'myservice-hosting_account_password': 'mypass',
'myservice-hosting_account_two_factor_auth_code': '123456',
},
hosting_service_cls=TestService,
prefix='myservice')
self.assertTrue(form.is_valid())
self.assertEqual(
form.get_credentials(),
{
'username': 'myuser',
'password': 'mypass',
'two_factor_auth_code': '123456',
})
def test_save_new_account(self):
"""Testing HostingServiceAuthForm.save with new account"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertIsNotNone(hosting_account.pk)
self.assertEqual(hosting_account.service_name, 'test')
self.assertEqual(hosting_account.username, 'myuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertIsNone(hosting_account.hosting_url)
self.assertIsNone(hosting_account.local_site)
def test_save_new_account_with_existing_stored(self):
"""Testing HostingServiceAuthForm.save with new account matching
existing stored account information
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
orig_account = HostingServiceAccount.objects.create(
service_name='test',
username='myuser')
hosting_account = form.save()
self.assertIsNotNone(hosting_account.pk)
self.assertEqual(hosting_account.pk, orig_account.pk)
self.assertEqual(hosting_account.service_name, 'test')
self.assertEqual(hosting_account.username, 'myuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertIsNone(hosting_account.hosting_url)
self.assertIsNone(hosting_account.local_site)
def test_save_new_account_with_hosting_url(self):
"""Testing HostingServiceAuthForm.save with new account and hosting URL
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
'hosting_url': 'example.com',
},
hosting_service_cls=SelfHostedTestService)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertIsNotNone(hosting_account.pk)
self.assertEqual(hosting_account.service_name, 'self_hosted_test')
self.assertEqual(hosting_account.username, 'myuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertEqual(hosting_account.hosting_url, 'example.com')
self.assertIsNone(hosting_account.local_site)
def test_save_new_account_with_hosting_url_not_self_hosted(self):
"""Testing HostingServiceAuthForm.save with new account and hosting URL
with non-self-hosted service
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
'hosting_url': 'example.com',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
self.assertNotIn('hosting_url', form.cleaned_data)
hosting_account = form.save()
self.assertIsNone(hosting_account.hosting_url)
def test_save_new_account_without_hosting_url_self_hosted(self):
"""Testing HostingServiceAuthForm.save with new account and no
hosting URL with a self-hosted service
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=SelfHostedTestService)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'hosting_url': ['This field is required.'],
})
def test_save_new_account_with_local_site(self):
"""Testing HostingServiceAuthForm.save with new account and Local Site
"""
local_site = LocalSite.objects.create(name='test-site')
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService,
local_site=local_site)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertIsNotNone(hosting_account.pk)
self.assertEqual(hosting_account.service_name, 'test')
self.assertEqual(hosting_account.username, 'myuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertEqual(hosting_account.local_site, local_site)
self.assertIsNone(hosting_account.hosting_url)
def test_save_new_account_without_username(self):
"""Testing HostingServiceAuthForm.save with new account and no
username in credentials
"""
class MyAuthForm(HostingServiceAuthForm):
def get_credentials(self):
return {}
form = MyAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
expected_message = (
'Hosting service implementation error: '
'MyAuthForm.get_credentials() must return a "username" key.'
)
with self.assertRaisesMessage(AuthorizationError, expected_message):
form.save()
def test_save_existing_account(self):
"""Testing HostingServiceAuthForm.save with updating existing account
"""
orig_account = HostingServiceAccount.objects.create(
service_name='test',
username='myuser')
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService,
hosting_account=orig_account)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertIs(hosting_account, orig_account)
self.assertEqual(hosting_account.pk, orig_account.pk)
self.assertEqual(hosting_account.service_name, 'test')
self.assertEqual(hosting_account.username, 'myuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertIsNone(hosting_account.hosting_url)
self.assertIsNone(hosting_account.local_site)
def test_save_existing_account_new_username(self):
"""Testing HostingServiceAuthForm.save with updating existing account
with new username
"""
orig_account = HostingServiceAccount.objects.create(
service_name='test',
username='myuser')
form = HostingServiceAuthForm(
{
'hosting_account_username': 'mynewuser',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService,
hosting_account=orig_account)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertIs(hosting_account, orig_account)
self.assertEqual(hosting_account.pk, orig_account.pk)
self.assertEqual(hosting_account.service_name, 'test')
self.assertEqual(hosting_account.username, 'mynewuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertIsNone(hosting_account.hosting_url)
self.assertIsNone(hosting_account.local_site)
def test_save_existing_account_new_hosting_url(self):
"""Testing HostingServiceAuthForm.save with updating existing account
with new hosting URL
"""
orig_account = HostingServiceAccount.objects.create(
service_name='self_hosted_test',
username='myuser',
hosting_url='example1.com')
form = HostingServiceAuthForm(
{
'hosting_account_username': 'myuser',
'hosting_account_password': 'mypass',
'hosting_url': 'example2.com',
},
hosting_service_cls=SelfHostedTestService,
hosting_account=orig_account)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertIs(hosting_account, orig_account)
self.assertEqual(hosting_account.pk, orig_account.pk)
self.assertEqual(hosting_account.service_name, 'self_hosted_test')
self.assertEqual(hosting_account.username, 'myuser')
self.assertEqual(hosting_account.data['password'], 'mypass')
self.assertEqual(hosting_account.hosting_url, 'example2.com')
self.assertIsNone(hosting_account.local_site)
def test_save_existing_account_new_service_fails(self):
"""Testing HostingServiceAuthForm.save with updating existing account
with new hosting service fails
"""
orig_account = HostingServiceAccount.objects.create(
service_name='self_hosted_test',
username='myuser',
hosting_url='example1.com')
expected_message = (
'This account is not compatible with this hosting service '
'configuration.'
)
with self.assertRaisesMessage(ValueError, expected_message):
HostingServiceAuthForm(hosting_service_cls=TestService,
hosting_account=orig_account)
def test_save_existing_account_new_local_site_fails(self):
"""Testing HostingServiceAuthForm.save with updating existing account
with new Local Site fails
"""
orig_account = HostingServiceAccount.objects.create(
service_name='text',
username='myuser')
expected_message = (
'This account is not compatible with this hosting service '
'configuration.'
)
with self.assertRaisesMessage(ValueError, expected_message):
HostingServiceAuthForm(
hosting_service_cls=TestService,
hosting_account=orig_account,
local_site=LocalSite.objects.create(name='test-site'))
def test_save_with_2fa_code_required(self):
"""Testing HostingServiceAuthForm.save with two-factor auth code
required
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': '2fa-user',
'hosting_account_password': 'mypass',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
self.assertFalse(
form.fields['hosting_account_two_factor_auth_code'].required)
with self.assertRaises(TwoFactorAuthCodeRequiredError):
form.save()
self.assertTrue(
form.fields['hosting_account_two_factor_auth_code'].required)
def test_save_with_2fa_code_provided(self):
"""Testing HostingServiceAuthForm.save with two-factor auth code
provided
"""
form = HostingServiceAuthForm(
{
'hosting_account_username': '2fa-user',
'hosting_account_password': 'mypass',
'hosting_account_two_factor_auth_code': '123456',
},
hosting_service_cls=TestService)
self.assertTrue(form.is_valid())
hosting_account = form.save()
self.assertEqual(hosting_account.service_name, 'test')
self.assertEqual(hosting_account.username, '2fa-user')
| mit |
40223150/w16b_test | static/Brython3.1.1-20150328-091302/Lib/signal.py | 743 | 1646 | """This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
| agpl-3.0 |
fabianvaccaro/pygums | pythonLibs/pymorph-0.96/pymorph/compat.py | 3 | 20493 | import warnings
warnings.warn('pymorph.compat should be replaced with pymorph', DeprecationWarning)
from notimplemented import *
from mmorph import *
from text import *
# old abbreviations:
clohole=close_holes
ero=erode
cero=cerode
dil=dilate
cdil=cdilate
sedil=sedilate
add4dil=add4dilate
uint8=to_uint8
uint16=to_uint16
int32=to_int32
glblshow=randomcolor
randomcolour=randomcolor
# mmnames:
def _not_implemented(msg):
def f(*args, **kwargs):
raise NotImplementedError, msg
f.__doc__ = '''\
This function is not implemented anymore.
%s''' % msg
return f
mmadd4dil=add4dil
mmaddm=addm
mmareaclose=areaclose
mmareaopen=areaopen
mmasf=asf
mmasfrec=asfrec
mmbinary=binary
mmblob=blob
mmbshow=bshow
mmcbisector=cbisector
mmcdil=cdil
mmcenter=center
mmcero=cero
mmclohole=clohole
mmclose=close
mmcloserec=closerec
mmcloserecth=closerecth
mmcloseth=closeth
mmconcat=concat
mmcthick=cthick
mmcthin=cthin
mmcwatershed=cwatershed
mmdatatype=datatype
mmdil=dil
mmdist=dist
mmdrawv=drawv
mmdtshow=_not_implemented('dtshow: use matplotlib')
mmedgeoff=edgeoff
mmero=ero
mmflood=flood
mmframe=frame
mmgdist=gdist
mmgdtshow=_not_implemented('gdtshow: use matplotlib')
mmgradm=gradm
mmgrain=grain
mmgray=gray
mmhistogram=histogram
mmhmax=hmax
mmhmin=hmin
mmhomothick=homothick
mmhomothin=homothin
mmimg2se=img2se
mminfcanon=infcanon
mminfgen=infgen
mminfrec=infrec
mminpos=inpos
mminterot=interot
mmintersec=intersec
mmintershow=intershow
mmisbinary=isbinary
mmisequal=isequal
mmlabel=label
mmlabelflat=labelflat
mmlastero=lastero
mmlblshow=_not_implemented('lblshow: use matplotlib')
mmlimits=limits
mmmat2set=mat2set
mmmaxleveltype=maxleveltype
mmneg=neg
mmopen=open
mmopenrec=openrec
mmopenrecth=openrecth
mmopenth=openth
mmopentransf=opentransf
mmpad4n=pad4n
mmpatspec=patspec
mmreadgray=_not_implemented('readgray: use PIL or readmagick')
mmregmax=regmax
mmregmin=regmin
mmse2hmt=se2hmt
mmse2interval=se2interval
mmsebox=sebox
mmsecross=secross
mmsedil=sedil
mmsedisk=sedisk
mmseline=seline
mmsereflect=sereflect
mmserot=serot
mmseshow=seshow
mmsesum=sesum
mmset2mat=set2mat
mmsetrans=setrans
mmseunion=seunion
mmskelm=skelm
mmskelmrec=skelmrec
mmskiz=skiz
mmsubm=subm
mmsupcanon=supcanon
mmsupgen=supgen
mmsuprec=suprec
mmswatershed=swatershed
mmsymdif=symdiff
mmtext=text
mmthick=thick
mmthin=thin
mmthreshad=threshad
mmtoggle=toggle
mmunion=union
mmvmax=vmax
mmwatershed=watershed
gshow=overlay
gdtshow=isolines
# Functions which were removed:
def mminstall(*args):
pass
def mmversion(*args):
pass
def mmregister(*args):
pass
def mmcmp(f1, oper, f2, oper1=None, f3=None):
"""
- Alternative:
Consider using array operations directly, i.e., instead of
mmcmp(f1, '>', f2)
simply use
f1 > f2
- Purpose
Compare two images pixelwisely.
- Synopsis
y = mmcmp(f1, oper, f2, oper1=None, f3=None)
- Input
f1: Gray-scale (uint8 or uint16) or binary image.
oper: String Default: "". relationship from: '==', '~=',
'<','<=', '>', '>='.
f2: Gray-scale (uint8 or uint16) or binary image.
oper1: String Default: None. relationship from: '==', '~=',
'<','<=', '>', '>='.
f3: Gray-scale (uint8 or uint16) or binary image. Default:
None.
- Output
y: Binary image.
- Description
Apply the relation oper to each pixel of images f1 and f2 , the
result is a binary image with the same size. Optionally, it is
possible to make the comparison among three image. It is
possible to use a constant value in place of any image, in this
case the constant is treated as an image of the same size as the
others with all pixels with the value of the constant.
- Examples
#
# example 1
#
print cmp(to_uint8([1, 2, 3]),'<', to_uint8(2))
print cmp(to_uint8([1, 2, 3]),'<', to_uint8([0, 2, 4]))
print cmp(to_uint8([1, 2, 3]),'==', to_uint8([1, 1, 3]))
#
# example 2
#
f=readgray('keyb.tif')
fbin=cmp(to_uint8(10), '<', f, '<', to_uint8(50))
show(f)
show(fbin)
"""
if oper == '==': y = (f1==f2)
elif oper == '~=': y = (f1!=f2)
elif oper == '<=': y = (f1<=f2)
elif oper == '>=': y = (f1>=f2)
elif oper == '>': y = (f1> f2)
elif oper == '<': y = (f1< f2)
else:
assert 0, 'oper must be one of: ==, ~=, >, >=, <, <=, it was:'+oper
if oper1 != None:
if oper1 == '==': y = intersec(y, f2==f3)
elif oper1 == '~=': y = intersec(y, f2!=f3)
elif oper1 == '<=': y = intersec(y, f2<=f3)
elif oper1 == '>=': y = intersec(y, f2>=f3)
elif oper1 == '>': y = intersec(y, f2> f3)
elif oper1 == '<': y = intersec(y, f2< f3)
else:
assert 0, 'oper1 must be one of: ==, ~=, >, >=, <, <=, it was:'+oper1
y = binary(y)
return y
def mmvdome(f, v=1, Bc=None):
"""
- Purpose
Obsolete, use vmax.
- Synopsis
y = mmvdome(f, v=1, Bc=None)
- Input
f: Gray-scale (uint8 or uint16) image.
v: Default: 1. Volume parameter.
Bc: Structuring Element Default: None (3x3 elementary cross).
Structuring element (connectivity).
- Output
y: Gray-scale (uint8 or uint16) or binary image.
- Description
The correct name for this operator mmvdome is vmax.
"""
if Bc is None: Bc = secross()
y = hmax(f,v,Bc);
return y
def mmis(f1, oper, f2=None, oper1=None, f3=None):
"""
- Alternative
Consider using array operations or isbinary()
- Purpose
Verify if a relationship among images is true or false.
- Synopsis
y = mmis(f1, oper, f2=None, oper1=None, f3=None)
- Input
f1: Gray-scale (uint8 or uint16) or binary image.
oper: String relationship from: '==', '~=', '<','<=', '>',
'>=', 'binary', 'gray'.
f2: Gray-scale (uint8 or uint16) or binary image. Default:
None.
oper1: String Default: None. relationship from: '==', '~=',
'<','<=', '>', '>='.
f3: Gray-scale (uint8 or uint16) or binary image. Default:
None.
- Output
y: Bool value: 0 or 1
- Description
Verify if the property or relatioship between images is true or
false. The result is true if the relationship is true for all
the pixels in the image, and false otherwise. (Obs: This
function replaces is equal, is lesseq, is binary ).
- Examples
#
fbin=binary([0, 1])
f1=to_uint8([1, 2, 3])
f2=to_uint8([2, 2, 3])
f3=to_uint8([2, 3, 4])
mmis(fbin,'binary')
mmis(f1,'gray')
mmis(f1,'==',f2)
mmis(f1,'<',f3)
mmis(f1,'<=',f2)
mmis(f1,'<=',f2,'<=',f3)
"""
from string import upper
if f2 == None:
oper=upper(oper);
if oper == 'BINARY': return isbinary(f1)
elif oper == 'GRAY' : return not isbinary(f1)
else:
assert 0,'oper should be BINARY or GRAY, was'+oper
elif oper == '==': y = isequal(f1, f2)
elif oper == '~=': y = not isequal(f1,f2)
elif oper == '<=': y = mmislesseq(f1,f2)
elif oper == '>=': y = mmislesseq(f2,f1)
elif oper == '>': y = isequal(neg(threshad(f2,f1)),binary(1))
elif oper == '<': y = isequal(neg(threshad(f1,f2)),binary(1))
else:
assert 0,'oper must be one of: ==, ~=, >, >=, <, <=, it was:'+oper
if oper1 != None:
if oper1 == '==': y = y and isequal(f2,f3)
elif oper1 == '~=': y = y and (not isequal(f2,f3))
elif oper1 == '<=': y = y and mmislesseq(f2,f3)
elif oper1 == '>=': y = y and mmislesseq(f3,f2)
elif oper1 == '>': y = y and isequal(neg(threshad(f3,f2)),binary(1))
elif oper1 == '<': y = y and isequal(neg(threshad(f2,f3)),binary(1))
else:
assert 0,'oper1 must be one of: ==, ~=, >, >=, <, <=, it was:'+oper1
return y
def mmislesseq(f1, f2, MSG=None):
"""
- Alternative
Consider using f1 <= f2
- Purpose
Verify if one image is less or equal another (is beneath)
- Synopsis
bool = mmislesseq(f1, f2)
- Input
f1: Gray-scale (uint8 or uint16) or binary image.
f2: Gray-scale (uint8 or uint16) or binary image.
- Output
bool: Boolean
- Description
mmislesseq compares the images f1 and f2 and returns true (1),
if f1(x) <= f2(x) , for every pixel x, and false (0), otherwise.
- Examples
#
f1 = to_uint8([0, 1, 2, 3])
f2 = to_uint8([9, 5, 3, 3])
print mmislesseq(f1,f2)
print mmislesseq(f2,f1)
print mmislesseq(f1,f1)
"""
from numpy import ravel
bool = min(ravel(f1<=f2))
return bool
def mmstats(f, measurement):
"""
- Purpose
Find global image statistics.
- Synopsis
y = mmstats(f, measurement)
- Input
f:
measurement: String Default: "". Choose the measure to compute:
'max', 'min', 'median', 'mean', 'sum', 'std',
'std1'.
- Output
y:
- Description
Compute global image statistics: 'max' - maximum gray-scale
value in image; 'min' - minimum gray-scale value in image; 'sum'
- sum of all pixel values; 'median' - median value of all pixels
in image; 'mean' - mean value of all pixels in image; 'std' -
standard deviation of all pixels (normalized by N-1); 'std1' -
idem, normalized by N.
"""
from string import upper
from numpy import ravel
from numpy.oldnumeric.mlab import mean, median, std
measurement = upper(measurement)
if measurement == 'MAX': return f.max()
elif measurement == 'MIN': return f.min()
elif measurement == 'MEAN': return f.mean()
elif measurement == 'MEDIAN': return f.median()
elif measurement == 'STD': return f.std()
else:
assert 0,'pymorph.compat.mmstats: Not a valid measurement'
def mmsurf(f,options = None):
return f
_figs = [None]
def plot(plotitems=[], options=[], outfig=-1, filename=None):
"""
- Purpose
Plot a function.
- Synopsis
fig = plot(plotitems=[], options=[], outfig=-1, filename=None)
- Input
plotitems: Default: []. List of plotitems.
options: Default: []. List of options.
outfig: Default: -1. Integer. Figure number. 0 creates a new
figure.
filename: Default: None. String. Name of the PNG output file.
- Output
fig: Figure number.
- Examples
#
import numpy
#
x = numpy.arange(0, 2*numpy.pi, 0.1)
plot([[x]])
y1 = numpy.sin(x)
y2 = numpy.cos(x)
opts = [['title', 'Example Plot'],\
['grid'],\
['style', 'linespoints'],\
['xlabel', '"X values"'],\
['ylabel', '"Y Values"']]
y1_plt = [x, y1, None, 'sin(X)']
y2_plt = [x, y2, 'lines', 'cos(X)']
#
# plotting two graphs using one step
fig1 = plot([y1_plt, y2_plt], opts, 0)
#
# plotting the same graphs using two steps
fig2 = plot([y1_plt], opts, 0)
fig2 = plot([y2_plt], opts, fig2)
#
# first function has been lost, lets recover it
opts.append(['replot'])
fig2 = plot([y1_plt], opts, fig2)
"""
import Gnuplot
import numpy
newfig = 0
if (plotitems == 'reset'):
_figs[0] = None
_figs[1:] = []
return 0
if len(plotitems) == 0:
# no plotitems specified: replot current figure
if _figs[0]:
outfig = _figs[0]
g = _figs[outfig]
g.replot()
return outfig
else:
#assert 0, "plot error: There is no current figure\n"
print "plot error: There is no current figure\n"
return 0
# figure to be plotted
if ((outfig < 0) and _figs[0]):
# current figure
outfig = _figs[0]
elif ( (outfig == 0) or ( (outfig == -1) and not _figs[0] ) ):
# new figure
newfig = 1
outfig = len(_figs)
elif outfig >= len(_figs):
#assert 0, 'plot error: Figure ' + str(outfig) + 'does not exist\n'
print 'plot error: Figure ' + str(outfig) + 'does not exist\n'
return 0
#current figure
_figs[0] = outfig
# Gnuplot pointer
if newfig:
if len(_figs) > 20:
print '''plot error: could not create figure. Too many PlotItems in memory (20). Use
plot('reset') to clear table'''
return 0
g = Gnuplot.Gnuplot()
_figs.append(g)
else:
g = _figs[outfig]
# options
try:
options.remove(['replot'])
except:
g.reset()
try:
#default style
g('set data style lines')
for option in options:
if option[0] == 'grid':
g('set grid')
elif option[0] == 'title':
g('set title "' + option[1] + '"')
elif option[0] == 'xlabel':
g('set xlabel ' + option[1])
elif option[0] == 'ylabel':
g('set ylabel ' + option[1])
elif option[0] == 'style':
g('set data style ' + option[1])
else:
print "plot warning: Unknown option: " + option[0]
except:
print "plot warning: Bad usage in options! Using default values. Please, use help.\n"
# Plot items: item[0]=x, item[1]=y, item[2]=style
for item in plotitems:
try:
title = None
style = None
x = numpy.ravel(item[0])
if len(item) > 1:
# y axis specified
y = numpy.ravel(item[1])
if len(item) > 2:
# style specified
style = item[2]
if len(item) > 3:
title = item[3]
else:
# no y axis specified
y = x
x = numpy.arange(len(y))
g.replot(Gnuplot.Data(x, y, title=title, with_=style))
except:
g.reset()
if newfig:
_figs.pop()
#assert 0, "plot error: Bad usage in plotitems! Impossible to plot graph. Please, use help.\n"
print "plot error: Bad usage in plotitems! Impossible to plot graph. Please, use help.\n"
return 0
# PNG file
if filename:
g.hardcopy(filename, terminal='png', color=1)
fig = outfig
return fig
mmplot=plot
def mmwatershed(f,Bc=None,linereg='LINES'):
return watershed(f,Bc,(linereg == 'LINES'))
def mmcwatershed(f,Bc=None,linereg='LINES'):
return cwatershed(f,Bc,(linereg == 'LINES'))
def mmskiz(f,Bc=None,LINEREG='LINES',METRIC=None):
return skiz(f,Bc,(LINEREG=='LINES'),METRIC)
def mmdist(f,Bc=None,METRIC=None):
return dist(f,Bc,metric=METRIC)
def mmendpoints(OPTION='LOOP'):
return endpoints(option=OPTION)
def mmgshow(X, X1=None, X2=None, X3=None, X4=None, X5=None, X6=None):
"""
- Purpose
Apply binary overlays as color layers on a binary or gray-scale
image
- Synopsis
Y = gshow(X, X1=None, X2=None, X3=None, X4=None, X5=None,
X6=None)
- Input
X: Gray-scale (uint8 or uint16) or binary image.
X1: Binary image. Default: None. Red overlay.
X2: Binary image. Default: None. Green overlay.
X3: Binary image. Default: None. Blue overlay.
X4: Binary image. Default: None. Magenta overlay.
X5: Binary image. Default: None. Yellow overlay.
X6: Binary image. Default: None. Cyan overlay.
- Output
Y: Gray-scale (uint8 or uint16) or binary image.
"""
if isbinary(X): X = gray(X,'uint8')
r = X
g = X
b = X
if X1 is not None: # red 1 0 0
assert isbinary(X1),'X1 must be binary overlay'
x1 = gray(X1,'uint8')
r = union(r,x1)
g = intersec(g,neg(x1))
b = intersec(b,neg(x1))
if X2 is not None: # green 0 1 0
assert isbinary(X2),'X2 must be binary overlay'
x2 = gray(X2,'uint8')
r = intersec(r,neg(x2))
g = union(g,x2)
b = intersec(b,neg(x2))
if X3 is not None: # blue 0 0 1
assert isbinary(X3),'X3 must be binary overlay'
x3 = gray(X3,'uint8')
r = intersec(r,neg(x3))
g = intersec(g,neg(x3))
b = union(b,x3)
if X4 is not None: # magenta 1 0 1
assert isbinary(X4),'X4 must be binary overlay'
x4 = gray(X4,'uint8')
r = union(r,x4)
g = intersec(g,neg(x4))
b = union(b,x4)
if X5 is not None: # yellow 1 1 0
assert isbinary(X5),'X5 must be binary overlay'
x5 = gray(X5,'uint8')
r = union(r,x5)
g = union(g,x5)
b = intersec(b,neg(x5))
if X6 is not None: # cyan 0 1 1
assert isbinary(X6),'X6 must be binary overlay'
x6 = gray(X6,'uint8')
r = intersec(r,neg(x6))
g = union(g,x6)
b = union(b,x6)
return concat('d',r,g,b)
def mmglblshow(X, border=0.0):
"""
- Purpose
Apply a random color table to a gray-scale image.
- Synopsis
Y = glblshow(X, border=0.0)
- Input
X: Gray-scale (uint8 or uint16) image. Labeled image.
border: Boolean Default: 0.0. Labeled image.
- Output
Y: Gray-scale (uint8 or uint16) or binary image.
"""
from numpy import take, resize, shape
from numpy.random import rand
mmin = X.min()
mmax = X.max()
ncolors = mmax - mmin + 1
R = to_int32(rand(ncolors)*255)
G = to_int32(rand(ncolors)*255)
B = to_int32(rand(ncolors)*255)
if mmin == 0:
R[0],G[0],B[0] = 0,0,0
r=resize(take(R, X.ravel() - mmin),X.shape)
g=resize(take(G, X.ravel() - mmin),X.shape)
b=resize(take(B, X.ravel() - mmin),X.shape)
Y=concat('d',r,g,b)
return Y
def readgray(filename):
"""
- Purpose
Read an image from a coercial file format and stores it as a
gray-scale image.
- Synopsis
y = readgray(filename)
- Input
filename: String Name of file to read.
- Output
y: Gray-scale (uint8 or uint16) or binary image.
- Description
readgray reads the image in filename and stores it in y , an
uint8 gray-scale image (without colormap). If the input file is
a color RGB image, it is converted to gray-scale using the
equation: y = 0.2989 R + 0.587 G + 0.114 B. This functions uses
de PIL module.
- Examples
#
a=readgray('cookies.tif')
show(a)
"""
import pylab
import numpy
y = pylab.imread(filename)
if (len(y.shape) == 3) and (y.shape[0] == 3):
if numpy.alltrue(numpy.alltrue(y[0,:,:] == y[1,:,:] and
y[0,:,:] == y[2,:,:])):
y = y[0,:,:]
else:
print 'Warning: converting true-color RGB image to gray'
y = ubyte(0.2989 * y[0,:,:] +
0.5870 * y[1,:,:] +
0.1140 * y[2,:,:])
elif (len(y.shape) == 2):
pass
else:
raise ValueError, 'Error, it is not 2D image'
return y
def freedom(L=5):
"""
DOES NOT DO ANYTHING
"""
return -1
mmfreedom=freedom
| gpl-2.0 |
Lujeni/ansible | lib/ansible/module_utils/network/iosxr/providers/cli/config/bgp/address_family.py | 38 | 3850 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.iosxr.providers.providers import CliProvider
class AddressFamily(CliProvider):
def render(self, config=None):
commands = list()
safe_list = list()
router_context = 'router bgp %s' % self.get_value('config.bgp_as')
context_config = None
for item in self.get_value('config.address_family'):
context = 'address-family %s %s' % (item['afi'], item['safi'])
context_commands = list()
if config:
context_path = [router_context, context]
context_config = self.get_config_context(config, context_path, indent=1)
for key, value in iteritems(item):
if value is not None:
meth = getattr(self, '_render_%s' % key, None)
if meth:
resp = meth(item, context_config)
if resp:
context_commands.extend(to_list(resp))
if context_commands:
commands.append(context)
commands.extend(context_commands)
commands.append('exit')
safe_list.append(context)
if config:
resp = self._negate_config(config, safe_list)
commands.extend(resp)
return commands
def _negate_config(self, config, safe_list=None):
commands = list()
matches = re.findall(r'(address-family .+)$', config, re.M)
for item in set(matches).difference(safe_list):
commands.append('no %s' % item)
return commands
def _render_networks(self, item, config=None):
commands = list()
safe_list = list()
for entry in item['networks']:
network = entry['prefix']
if entry['masklen']:
network = '%s/%s' % (entry['prefix'], entry['masklen'])
safe_list.append(network)
cmd = 'network %s' % network
if entry['route_map']:
cmd += ' route-policy %s' % entry['route_map']
if not config or cmd not in config:
commands.append(cmd)
if config and self.params['operation'] == 'replace':
matches = re.findall(r'network (\S+)', config, re.M)
for entry in set(matches).difference(safe_list):
commands.append('no network %s' % entry)
return commands
def _render_redistribute(self, item, config=None):
commands = list()
safe_list = list()
for entry in item['redistribute']:
option = entry['protocol']
cmd = 'redistribute %s' % entry['protocol']
if entry['id'] and entry['protocol'] in ('ospf', 'eigrp', 'isis', 'ospfv3'):
cmd += ' %s' % entry['id']
option += ' %s' % entry['id']
if entry['metric']:
cmd += ' metric %s' % entry['metric']
if entry['route_map']:
cmd += ' route-policy %s' % entry['route_map']
if not config or cmd not in config:
commands.append(cmd)
safe_list.append(option)
if self.params['operation'] == 'replace':
if config:
matches = re.findall(r'redistribute (\S+)(?:\s*)(\d*)', config, re.M)
for i in range(0, len(matches)):
matches[i] = ' '.join(matches[i]).strip()
for entry in set(matches).difference(safe_list):
commands.append('no redistribute %s' % entry)
return commands
| gpl-3.0 |
yanheven/horizon | openstack_dashboard/urls.py | 56 | 1979 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls.static import static # noqa
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
import horizon
urlpatterns = patterns(
'',
url(r'^$', 'openstack_dashboard.views.splash', name='splash'),
url(r'^api/', include('openstack_dashboard.api.rest.urls')),
url(r'', include(horizon.urls)),
)
for u in getattr(settings, 'AUTHENTICATION_URLS', ['openstack_auth.urls']):
urlpatterns += patterns(
'',
url(r'^auth/', include(u))
)
# Development static app and project media serving using the staticfiles app.
urlpatterns += staticfiles_urlpatterns()
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^500/$', 'django.views.defaults.server_error')
)
| apache-2.0 |
edxzw/edx-platform | lms/djangoapps/courseware/tests/test_masquerade.py | 73 | 15133 | """
Unit tests for masquerade.
"""
import json
import pickle
from mock import patch
from nose.plugins.attrib import attr
from datetime import datetime
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.timezone import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import (
CourseMasquerade,
MasqueradingKeyValueStore,
handle_ajax,
setup_masquerade,
get_masquerading_group_info
)
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase, get_request_for_user
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.partitions.partitions import Group, UserPartition
class MasqueradeTestCase(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
def setUp(self):
super(MasqueradeTestCase, self).setUp()
# By default, tests run with DISABLE_START_DATES=True. To test that masquerading as a student is
# working properly, we must use start dates and set a start date in the past (otherwise the access
# checks exist prematurely).
self.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC())})
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category="chapter",
display_name="Test Section",
)
self.sequential_display_name = "Test Masquerade Subsection"
self.sequential = ItemFactory.create(
parent_location=self.chapter.location,
category="sequential",
display_name=self.sequential_display_name,
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
self.problem_display_name = "TestMasqueradeProblem"
self.problem = ItemFactory.create(
parent_location=self.vertical.location,
category='problem',
data=problem_xml,
display_name=self.problem_display_name
)
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.sequential.location.name,
}
)
return self.client.get(url)
def _create_mock_json_request(self, user, body, method='POST', session=None):
"""
Returns a mock JSON request for the specified user
"""
request = get_request_for_user(user)
request.method = method
request.META = {'CONTENT_TYPE': ['application/json']}
request.body = body
request.session = session or {}
return request
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertTrue(self.sequential_display_name in content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertTrue(self.problem_display_name in problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
@attr('shard_1')
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr('shard_1')
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@attr('shard_1')
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
return json.loads(self.look_at_question(self.problem_display_name).content)['progress_detail']
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
# Answer correctly as the student, and check progress.
self.login_student()
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=self.student_user.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.login_student()
self.assertEqual(self.get_progress_detail(), u'2/2')
@attr('shard_1')
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
# Verify that there is no masquerading group initially
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertIsNone(group_id)
self.assertIsNone(user_partition_id)
# Install a masquerading group
request = self._create_mock_json_request(
self.test_user,
body='{"role": "student", "user_partition_id": 0, "group_id": 1}'
)
handle_ajax(request, unicode(self.course.id))
setup_masquerade(request, self.test_user, True)
# Verify that the masquerading group is returned
group_id, user_partition_id = get_masquerading_group_info(self.test_user, self.course.id)
self.assertEqual(group_id, 1)
self.assertEqual(user_partition_id, 0)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
| agpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/utils/tests/test_weighted_mode.py | 3 | 1095 | import numpy as np
from nose.tools import assert_true
from sklearn.utils.extmath import weighted_mode
from scipy import stats
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
if __name__ == '__main__':
import nose
nose.runmodule()
| agpl-3.0 |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/distutils/cmd.py | 97 | 19147 | """distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
import sys, os, re
from distutils.errors import DistutilsOptionError
from distutils import util, dir_util, file_util, archive_util, dep_util
from distutils import log
class Command:
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
# 'sub_commands' formalizes the notion of a "family" of commands,
# eg. "install" as the parent with sub-commands "install_lib",
# "install_headers", etc. The parent of a family of commands
# defines 'sub_commands' as a class attribute; it's a list of
# (command_name : string, predicate : unbound_method | string | None)
# tuples, where 'predicate' is a method of the parent command that
# determines whether the corresponding command is applicable in the
# current situation. (Eg. we "install_headers" is only applicable if
# we have any C header files to install.) If 'predicate' is None,
# that command is always applicable.
#
# 'sub_commands' is usually defined at the *end* of a class, because
# predicates can be unbound methods, so they must already have been
# defined. The canonical example is the "install" command.
sub_commands = []
# -- Creation/initialization methods -------------------------------
def __init__(self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
# late import because of mutual dependence between these classes
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError("dist must be a Distribution instance")
if self.__class__ is Command:
raise RuntimeError("Command is an abstract class")
self.distribution = dist
self.initialize_options()
# Per-command versions of the global flags, so that the user can
# customize Distutils' behaviour command-by-command and let some
# commands fall back on the Distribution's behaviour. None means
# "not defined, check self.distribution's copy", while 0 or 1 mean
# false and true (duh). Note that this means figuring out the real
# value of each flag is a touch complicated -- hence "self._dry_run"
# will be handled by __getattr__, below.
# XXX This needs to be fixed.
self._dry_run = None
# verbose is largely ignored, but needs to be set for
# backwards compatibility (I think)?
self.verbose = dist.verbose
# Some commands define a 'self.force' option to ignore file
# timestamps, but methods defined *here* assume that
# 'self.force' exists for all commands. So define it here
# just to be safe.
self.force = None
# The 'help' flag is just used for command-line parsing, so
# none of that complicated bureaucracy is needed.
self.help = 0
# 'finalized' records whether or not 'finalize_options()' has been
# called. 'finalize_options()' itself should not pay attention to
# this flag: it is the business of 'ensure_finalized()', which
# always calls 'finalize_options()', to respect/update it.
self.finalized = 0
# XXX A more explicit way to customize dry_run would be better.
def __getattr__(self, attr):
if attr == 'dry_run':
myval = getattr(self, "_" + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError(attr)
def ensure_finalized(self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
# Subclasses must define:
# initialize_options()
# provide default values for all options; may be customized by
# setup script, by options from config file(s), or by command-line
# options
# finalize_options()
# decide on the final values for all options; this is called
# after all possible intervention from the outside world
# (command-line, option file, etc.) has been processed
# run()
# run the command: do whatever it is we're here to do,
# controlled by the command's various option values
def initialize_options(self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError("abstract method -- subclass %s must override"
% self.__class__)
def finalize_options(self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError("abstract method -- subclass %s must override"
% self.__class__)
def dump_options(self, header=None, indent=""):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
self.announce(indent + header, level=log.INFO)
indent = indent + " "
for (option, _, _) in self.user_options:
option = option.translate(longopt_xlate)
if option[-1] == "=":
option = option[:-1]
value = getattr(self, option)
self.announce(indent + "%s = %s" % (option, value),
level=log.INFO)
def run(self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError("abstract method -- subclass %s must override"
% self.__class__)
def announce(self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
log.log(level, msg)
def debug_print(self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print(msg)
sys.stdout.flush()
# -- Option validation methods -------------------------------------
# (these are very handy in writing the 'finalize_options()' method)
#
# NB. the general philosophy here is to ensure that a particular option
# value meets certain type and value constraints. If not, we try to
# force it into conformance (eg. if we expect a list but have a string,
# split the string on comma and/or whitespace). If we can't force the
# option into conformance, raise DistutilsOptionError. Thus, command
# classes need do nothing more than (eg.)
# self.ensure_string_list('foo')
# and they can be guaranteed that thereafter, self.foo will be
# a list of strings.
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, str):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string(self, option, default=None):
"""Ensure that 'option' is a string; if not defined, set it to
'default'.
"""
self._ensure_stringlike(option, "string", default)
def ensure_string_list(self, option):
"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, str):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, str) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def _ensure_tested_string(self, option, tester, what, error_fmt,
default=None):
val = self._ensure_stringlike(option, what, default)
if val is not None and not tester(val):
raise DistutilsOptionError(("error in '%s' option: " + error_fmt)
% (option, val))
def ensure_filename(self, option):
"""Ensure that 'option' is the name of an existing file."""
self._ensure_tested_string(option, os.path.isfile,
"filename",
"'%s' does not exist or is not a file")
def ensure_dirname(self, option):
self._ensure_tested_string(option, os.path.isdir,
"directory name",
"'%s' does not exist or is not a directory")
# -- Convenience methods for commands ------------------------------
def get_command_name(self):
if hasattr(self, 'command_name'):
return self.command_name
else:
return self.__class__.__name__
def set_undefined_options(self, src_cmd, *option_pairs):
"""Set the values of any "undefined" options from corresponding
option values in some other command object. "Undefined" here means
"is None", which is the convention used to indicate that an option
has not been changed between 'initialize_options()' and
'finalize_options()'. Usually called from 'finalize_options()' for
options that depend on some other command rather than another
option of the same command. 'src_cmd' is the other command from
which option values will be taken (a command object will be created
for it if necessary); the remaining arguments are
'(src_option,dst_option)' tuples which mean "take the value of
'src_option' in the 'src_cmd' command object, and copy it to
'dst_option' in the current command object".
"""
# Option_pairs: list of (src_option, dst_option) tuples
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
setattr(self, dst_option, getattr(src_cmd_obj, src_option))
def get_finalized_command(self, command, create=1):
"""Wrapper around Distribution's 'get_command_obj()' method: find
(create if necessary and 'create' is true) the command object for
'command', call its 'ensure_finalized()' method, and return the
finalized command object.
"""
cmd_obj = self.distribution.get_command_obj(command, create)
cmd_obj.ensure_finalized()
return cmd_obj
# XXX rename to 'get_reinitialized_command()'? (should do the
# same in dist.py, if so)
def reinitialize_command(self, command, reinit_subcommands=0):
return self.distribution.reinitialize_command(command,
reinit_subcommands)
def run_command(self, command):
"""Run some other command: uses the 'run_command()' method of
Distribution, which creates and finalizes the command object if
necessary and then invokes its 'run()' method.
"""
self.distribution.run_command(command)
def get_sub_commands(self):
"""Determine the sub-commands that are relevant in the current
distribution (ie., that need to be run). This is based on the
'sub_commands' class attribute: each tuple in that list may include
a method that we call to determine if the subcommand needs to be
run for the current distribution. Return a list of command names.
"""
commands = []
for (cmd_name, method) in self.sub_commands:
if method is None or method(self):
commands.append(cmd_name)
return commands
# -- External world manipulation -----------------------------------
def warn(self, msg):
log.warn("warning: %s: %s\n" %
(self.get_command_name(), msg))
def execute(self, func, args, msg=None, level=1):
util.execute(func, args, msg, dry_run=self.dry_run)
def mkpath(self, name, mode=0o777):
dir_util.mkpath(name, mode, dry_run=self.dry_run)
def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
link=None, level=1):
"""Copy a file respecting verbose, dry-run and force flags. (The
former two default to whatever is in the Distribution object, and
the latter defaults to false for commands that don't define it.)"""
return file_util.copy_file(infile, outfile, preserve_mode,
preserve_times, not self.force, link,
dry_run=self.dry_run)
def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, level=1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(infile, outfile, preserve_mode,
preserve_times, preserve_symlinks,
not self.force, dry_run=self.dry_run)
def move_file (self, src, dst, level=1):
"""Move a file respecting dry-run flag."""
return file_util.move_file(src, dst, dry_run=self.dry_run)
def spawn(self, cmd, search_path=1, level=1):
"""Spawn an external command respecting dry-run flag."""
from distutils.spawn import spawn
spawn(cmd, search_path, dry_run=self.dry_run)
def make_archive(self, base_name, format, root_dir=None, base_dir=None,
owner=None, group=None):
return archive_util.make_archive(base_name, format, root_dir, base_dir,
dry_run=self.dry_run,
owner=owner, group=group)
def make_file(self, infiles, outfile, func, args,
exec_msg=None, skip_msg=None, level=1):
"""Special case of 'execute()' for operations that process one or
more input files and generate one output file. Works just like
'execute()', except the operation is skipped and a different
message printed if 'outfile' already exists and is newer than all
files listed in 'infiles'. If the command defined 'self.force',
and it is true, then the command is unconditionally run -- does no
timestamp checks.
"""
if skip_msg is None:
skip_msg = "skipping %s (inputs unchanged)" % outfile
# Allow 'infiles' to be a single string
if isinstance(infiles, str):
infiles = (infiles,)
elif not isinstance(infiles, (list, tuple)):
raise TypeError(
"'infiles' must be a string, or a list or tuple of strings")
if exec_msg is None:
exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles))
# If 'outfile' must be regenerated (either because it doesn't
# exist, is out-of-date, or the 'force' flag is true) then
# perform the action that presumably regenerates it
if self.force or dep_util.newer_group(infiles, outfile):
self.execute(func, args, exec_msg, level)
# Otherwise, print the "skip" message
else:
log.debug(skip_msg)
# XXX 'install_misc' class not currently used -- it was the base class for
# both 'install_scripts' and 'install_data', but they outgrew it. It might
# still be useful for 'install_headers', though, so I'm keeping it around
# for the time being.
class install_misc(Command):
"""Common base class for installing some files in a subdirectory.
Currently used by install_data and install_scripts.
"""
user_options = [('install-dir=', 'd', "directory to install the files to")]
def initialize_options (self):
self.install_dir = None
self.outfiles = []
def _install_dir_from(self, dirname):
self.set_undefined_options('install', (dirname, 'install_dir'))
def _copy_files(self, filelist):
self.outfiles = []
if not filelist:
return
self.mkpath(self.install_dir)
for f in filelist:
self.copy_file(f, self.install_dir)
self.outfiles.append(os.path.join(self.install_dir, f))
def get_outputs(self):
return self.outfiles
| apache-2.0 |
nicjhan/MOM6-examples | land_ice_ocean_LM3_SIS2/OM_360x320_C180/preprocessing/changeChar.py | 11 | 1563 | #!/usr/bin/env python
def error(msg,code=9):
print 'Error: ' + msg
exit(code)
# Imports
try: import argparse
except: error('This version of python is not new enough. python 2.7 or newer is required.')
try: from netCDF4 import Dataset, stringtochar
except: error('Unable to import netCDF4 module. Check your PYTHONPATH.\n'
+'Perhaps try:\n module load python_netcdf4')
try: import numpy as np
except: error('Unable to import numpy module. Check your PYTHONPATH.\n'
+'Perhaps try:\n module load python_numpy')
def main():
# Command line arguments
parser = argparse.ArgumentParser(description=
'Changes the value of a named char variable in a netcdf file.',
epilog='Written by A.Adcroft, 2013.')
parser.add_argument('filename', type=str,
help='netcdf file to modify.')
parser.add_argument('variable', type=str,
help='Name of char variable to change.')
parser.add_argument('value', type=str,
help='Contents to change string to.')
optCmdLineArgs = parser.parse_args()
rg = Dataset(optCmdLineArgs.filename, 'a' );
if optCmdLineArgs.variable in rg.variables:
var = rg.variables[optCmdLineArgs.variable]
dat = np.empty(1,'S'+repr(len(var)))
dat[0] = optCmdLineArgs.value
dc = stringtochar(dat)
var[:] = dc
else: error('"'+optCmdLineArgs.variable+'" was not found in "'+optCmdLineArgs.filename+'".')
rg.close()
print 'File "%s" updated.'%(optCmdLineArgs.filename)
# Invoke main()
if __name__ == '__main__': main()
| gpl-3.0 |
sotdjin/glibglab | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| mit |
kamalx/edx-platform | common/djangoapps/track/tests/test_shim.py | 111 | 4737 | """Ensure emitted events contain the fields legacy processors expect to find."""
from mock import sentinel
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_events_equal
from track.tests import EventTrackingTestCase, FROZEN_TIME
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
| agpl-3.0 |
github-account-because-they-want-it/scrapy | scrapy/commands/parse.py | 108 | 8286 | from __future__ import print_function
import logging
from w3lib.url import is_url
from scrapy.commands import ScrapyCommand
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils import display
from scrapy.utils.conf import arglist_to_dict
from scrapy.utils.spider import iterate_spider_output, spidercls_for_request
from scrapy.exceptions import UsageError
logger = logging.getLogger(__name__)
class Command(ScrapyCommand):
requires_project = True
spider = None
items = {}
requests = {}
first_response = None
def syntax(self):
return "[options] <url>"
def short_desc(self):
return "Parse URL (using its spider) and print the results"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--spider", dest="spider", default=None, \
help="use this spider without looking for one")
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE", \
help="set spider argument (may be repeated)")
parser.add_option("--pipelines", action="store_true", \
help="process items through pipelines")
parser.add_option("--nolinks", dest="nolinks", action="store_true", \
help="don't show links to follow (extracted requests)")
parser.add_option("--noitems", dest="noitems", action="store_true", \
help="don't show scraped items")
parser.add_option("--nocolour", dest="nocolour", action="store_true", \
help="avoid using pygments to colorize the output")
parser.add_option("-r", "--rules", dest="rules", action="store_true", \
help="use CrawlSpider rules to discover the callback")
parser.add_option("-c", "--callback", dest="callback", \
help="use this callback for parsing, instead looking for a callback")
parser.add_option("-d", "--depth", dest="depth", type="int", default=1, \
help="maximum depth for parsing requests [default: %default]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", \
help="print each depth level one by one")
@property
def max_level(self):
levels = self.items.keys() + self.requests.keys()
if levels: return max(levels)
else: return 0
def add_items(self, lvl, new_items):
old_items = self.items.get(lvl, [])
self.items[lvl] = old_items + new_items
def add_requests(self, lvl, new_reqs):
old_reqs = self.requests.get(lvl, [])
self.requests[lvl] = old_reqs + new_reqs
def print_items(self, lvl=None, colour=True):
if lvl is None:
items = [item for lst in self.items.values() for item in lst]
else:
items = self.items.get(lvl, [])
print("# Scraped Items ", "-"*60)
display.pprint([dict(x) for x in items], colorize=colour)
def print_requests(self, lvl=None, colour=True):
if lvl is None:
levels = self.requests.keys()
if levels:
requests = self.requests[max(levels)]
else:
requests = []
else:
requests = self.requests.get(lvl, [])
print("# Requests ", "-"*65)
display.pprint(requests, colorize=colour)
def print_results(self, opts):
colour = not opts.nocolour
if opts.verbose:
for level in xrange(1, self.max_level+1):
print('\n>>> DEPTH LEVEL: %s <<<' % level)
if not opts.noitems:
self.print_items(level, colour)
if not opts.nolinks:
self.print_requests(level, colour)
else:
print('\n>>> STATUS DEPTH LEVEL %s <<<' % self.max_level)
if not opts.noitems:
self.print_items(colour=colour)
if not opts.nolinks:
self.print_requests(colour=colour)
def run_callback(self, response, cb):
items, requests = [], []
for x in iterate_spider_output(cb(response)):
if isinstance(x, (BaseItem, dict)):
items.append(x)
elif isinstance(x, Request):
requests.append(x)
return items, requests
def get_callback_from_rules(self, spider, response):
if getattr(spider, 'rules', None):
for rule in spider.rules:
if rule.link_extractor.matches(response.url) and rule.callback:
return rule.callback
else:
logger.error('No CrawlSpider rules found in spider %(spider)r, '
'please specify a callback to use for parsing',
{'spider': spider.name})
def set_spidercls(self, url, opts):
spider_loader = self.crawler_process.spider_loader
if opts.spider:
try:
self.spidercls = spider_loader.load(opts.spider)
except KeyError:
logger.error('Unable to find spider: %(spider)s',
{'spider': opts.spider})
else:
self.spidercls = spidercls_for_request(spider_loader, Request(url))
if not self.spidercls:
logger.error('Unable to find spider for: %(url)s',
{'url': url})
request = Request(url, opts.callback)
_start_requests = lambda s: [self.prepare_request(s, request, opts)]
self.spidercls.start_requests = _start_requests
def start_parsing(self, url, opts):
self.crawler_process.crawl(self.spidercls, **opts.spargs)
self.pcrawler = list(self.crawler_process.crawlers)[0]
self.crawler_process.start()
if not self.first_response:
logger.error('No response downloaded for: %(url)s',
{'url': url})
def prepare_request(self, spider, request, opts):
def callback(response):
# memorize first request
if not self.first_response:
self.first_response = response
# determine real callback
cb = response.meta['_callback']
if not cb:
if opts.rules and self.first_response == response:
cb = self.get_callback_from_rules(spider, response)
else:
cb = 'parse'
if not callable(cb):
cb_method = getattr(spider, cb, None)
if callable(cb_method):
cb = cb_method
else:
logger.error('Cannot find callback %(callback)r in spider: %(spider)s',
{'callback': callback, 'spider': spider.name})
return
# parse items and requests
depth = response.meta['_depth']
items, requests = self.run_callback(response, cb)
if opts.pipelines:
itemproc = self.pcrawler.engine.scraper.itemproc
for item in items:
itemproc.process_item(item, spider)
self.add_items(depth, items)
self.add_requests(depth, requests)
if depth < opts.depth:
for req in requests:
req.meta['_depth'] = depth + 1
req.meta['_callback'] = req.callback
req.callback = callback
return requests
request.meta['_depth'] = 1
request.meta['_callback'] = request.callback
request.callback = callback
return request
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
def run(self, args, opts):
# parse arguments
if not len(args) == 1 or not is_url(args[0]):
raise UsageError()
else:
url = args[0]
# prepare spidercls
self.set_spidercls(url, opts)
if self.spidercls and opts.depth > 0:
self.start_parsing(url, opts)
self.print_results(opts)
| bsd-3-clause |
Curso-OpenShift/Formulario | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/contrib/sessions/models.py | 347 | 1298 | from __future__ import unicode_literals
from django.contrib.sessions.base_session import (
AbstractBaseSession, BaseSessionManager,
)
class SessionManager(BaseSessionManager):
use_in_migrations = True
class Session(AbstractBaseSession):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
objects = SessionManager()
@classmethod
def get_session_store_class(cls):
from django.contrib.sessions.backends.db import SessionStore
return SessionStore
class Meta(AbstractBaseSession.Meta):
db_table = 'django_session'
| gpl-3.0 |
23andMe/ansible-modules-extras | packaging/os/zypper.py | 49 | 10899 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# based on
# openbsd_pkg
# (c) 2013
# Patrik Lundin <patrik.lundin.swe@gmail.com>
#
# yum
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
DOCUMENTATION = '''
---
module: zypper
author:
- "Patrick Callahan (@dirtyharrycallahan)"
- "Alexander Gubin (@alxgu)"
version_added: "1.2"
short_description: Manage packages on SUSE and openSUSE
description:
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
options:
name:
description:
- package name or package specifier with version C(name) or C(name-1.0). You can also pass a url or a local path to a rpm file.
required: true
aliases: [ 'pkg' ]
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
required: false
choices: [ present, latest, absent ]
default: "present"
type:
description:
- The type of package to be operated on.
required: false
choices: [ package, patch, pattern, product, srcpackage ]
default: "package"
version_added: "2.0"
disable_gpg_check:
description:
- Whether to disable to GPG signature checking of the package
signature being installed. Has an effect only if state is
I(present) or I(latest).
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
disable_recommends:
version_added: "1.8"
description:
- Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
notes: []
# informational: requirements for nodes
requirements: [ zypper, rpm ]
author: Patrick Callahan
'''
EXAMPLES = '''
# Install "nmap"
- zypper: name=nmap state=present
# Install apache2 with recommended packages
- zypper: name=apache2 state=present disable_recommends=no
# Remove the "nmap" package
- zypper: name=nmap state=absent
# Install the nginx rpm from a remote repo
- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present
# Install local rpm file
- zypper: name=/tmp/fancy-software.rpm state=present
'''
# Function used for getting zypper version
def zypper_version(module):
"""Return (rc, message) tuple"""
cmd = ['/usr/bin/zypper', '-V']
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return rc, stdout
else:
return rc, stderr
# Function used for getting versions of currently installed packages.
def get_current_version(m, packages):
cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n']
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
current_version = {}
rpmoutput_re = re.compile('^(\S+) (\S+)$')
for stdoutline in stdout.splitlines():
match = rpmoutput_re.match(stdoutline)
if match == None:
return None
package = match.group(1)
version = match.group(2)
current_version[package] = version
for package in packages:
if package not in current_version:
print package + ' was not returned by rpm \n'
return None
return current_version
# Function used to find out if a package is currently installed.
def get_package_state(m, packages):
for i in range(0, len(packages)):
# Check state of a local rpm-file
if ".rpm" in packages[i]:
# Check if rpm file is available
package = packages[i]
if not os.path.isfile(package) and not '://' in package:
stderr = "No Package file matching '%s' found on system" % package
m.fail_json(msg=stderr)
# Get packagename from rpm file
cmd = ['/bin/rpm', '--query', '--qf', '%{NAME}', '--package']
cmd.append(package)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
packages[i] = stdout
cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n']
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
installed_state = {}
rpmoutput_re = re.compile('^package (\S+) (.*)$')
for stdoutline in stdout.splitlines():
match = rpmoutput_re.match(stdoutline)
if match == None:
return None
package = match.group(1)
result = match.group(2)
if result == 'is installed':
installed_state[package] = True
else:
installed_state[package] = False
for package in packages:
if package not in installed_state:
print package + ' was not returned by rpm \n'
return None
return installed_state
# Function used to make sure a package is present.
def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
packages = []
for package in name:
if installed_state[package] is False:
packages.append(package)
if len(packages) != 0:
cmd = ['/usr/bin/zypper', '--non-interactive']
# add global options before zypper command
if disable_gpg_check:
cmd.append('--no-gpg-checks')
cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
# add install parameter
if disable_recommends and not old_zypper:
cmd.append('--no-recommends')
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
if rc == 0:
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# Function used to make sure a package is the latest available version.
def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
# first of all, make sure all the packages are installed
(rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper)
# if we've already made a change, we don't have to check whether a version changed
if not changed:
pre_upgrade_versions = get_current_version(m, name)
cmd = ['/usr/bin/zypper', '--non-interactive']
if disable_gpg_check:
cmd.append('--no-gpg-checks')
if old_zypper:
cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
else:
cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type])
cmd.extend(name)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
# if we've already made a change, we don't have to check whether a version changed
if not changed:
post_upgrade_versions = get_current_version(m, name)
if pre_upgrade_versions != post_upgrade_versions:
changed = True
return (rc, stdout, stderr, changed)
# Function used to make sure a package is not installed.
def package_absent(m, name, installed_state, package_type, old_zypper):
packages = []
for package in name:
if installed_state[package] is True:
packages.append(package)
if len(packages) != 0:
cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type]
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd)
if rc == 0:
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='list'),
state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']),
disable_gpg_check = dict(required=False, default='no', type='bool'),
disable_recommends = dict(required=False, default='yes', type='bool'),
),
supports_check_mode = False
)
params = module.params
name = params['name']
state = params['state']
type_ = params['type']
disable_gpg_check = params['disable_gpg_check']
disable_recommends = params['disable_recommends']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
rc, out = zypper_version(module)
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
if not match or int(match.group(1)) > 0:
old_zypper = False
else:
old_zypper = True
# Get package state
installed_state = get_package_state(module, name)
# Perform requested action
if state in ['installed', 'present']:
(rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
elif state in ['absent', 'removed']:
(rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper)
elif state == 'latest':
(rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
if rc != 0:
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
result['changed'] = changed
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
MMTObservatory/camsrv | camsrv/tests/test_apps.py | 1 | 1233 | """
Sanity checks to make sure applications can be instantiated
"""
from tornado.testing import AsyncHTTPTestCase, gen_test
from ..camsrv import CAMsrv
from ..f9wfs import F9WFSsrv
from ..matcam import MATsrv
from ..ratcam import RATsrv
class TestSimSrv(AsyncHTTPTestCase):
def get_app(self):
app = CAMsrv(connect=False)
return app
@gen_test
def test_homepage(self):
response = yield self.http_client.fetch(self.get_url('/'))
self.assertEqual(response.code, 200)
class TestConnected(AsyncHTTPTestCase):
def get_app(self):
app = CAMsrv(connect=True)
return app
def test_read_then_disconnect(self):
response = self.fetch('/status')
self.assertEqual(response.code, 200)
self.assertIn(b"temperature", response.body)
response = self.fetch('/disconnect')
self.assertEqual(response.code, 200)
class TestF9Srv(TestSimSrv):
def get_app(self):
app = F9WFSsrv(connect=False)
return app
class TestMATSrv(TestSimSrv):
def get_app(self):
app = MATsrv(connect=False)
return app
class TestRATSrv(TestSimSrv):
def get_app(self):
app = RATsrv(connect=False)
return app
| bsd-3-clause |
villalonreina/dipy | dipy/align/tests/test_streamlinear.py | 9 | 15316 | import numpy as np
from numpy.testing import (run_module_suite,
assert_,
assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
assert_raises)
from dipy.align.streamlinear import (compose_matrix44,
decompose_matrix44,
BundleSumDistanceMatrixMetric,
BundleMinDistanceMatrixMetric,
BundleMinDistanceMetric,
StreamlineLinearRegistration,
StreamlineDistanceMetric)
from dipy.tracking.streamline import (center_streamlines,
unlist_streamlines,
relist_streamlines,
transform_streamlines,
set_number_of_points)
from dipy.core.geometry import compose_matrix
from dipy.data import get_data, two_cingulum_bundles
from nibabel import trackvis as tv
from dipy.align.bundlemin import (_bundle_minimum_distance_matrix,
_bundle_minimum_distance,
distance_matrix_mdf)
def simulated_bundle(no_streamlines=10, waves=False, no_pts=12):
t = np.linspace(-10, 10, 200)
# parallel waves or parallel lines
bundle = []
for i in np.linspace(-5, 5, no_streamlines):
if waves:
pts = np.vstack((np.cos(t), t, i * np.ones(t.shape))).T
else:
pts = np.vstack((np.zeros(t.shape), t, i * np.ones(t.shape))).T
pts = set_number_of_points(pts, no_pts)
bundle.append(pts)
return bundle
def fornix_streamlines(no_pts=12):
fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [set_number_of_points(i[0], no_pts) for i in streams]
return streamlines
def evaluate_convergence(bundle, new_bundle2):
pts_static = np.concatenate(bundle, axis=0)
pts_moved = np.concatenate(new_bundle2, axis=0)
assert_array_almost_equal(pts_static, pts_moved, 3)
def test_rigid_parallel_lines():
bundle_initial = simulated_bundle()
bundle, shift = center_streamlines(bundle_initial)
mat = compose_matrix44([20, 0, 10, 0, 40, 0])
bundle2 = transform_streamlines(bundle, mat)
bundle_sum_distance = BundleSumDistanceMatrixMetric()
options = {'maxcor': 100, 'ftol': 1e-9, 'gtol': 1e-16, 'eps': 1e-3}
srr = StreamlineLinearRegistration(metric=bundle_sum_distance,
x0=np.zeros(6),
method='L-BFGS-B',
bounds=None,
options=options)
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_rigid_real_bundles():
bundle_initial = fornix_streamlines()[:20]
bundle, shift = center_streamlines(bundle_initial)
mat = compose_matrix44([0, 0, 20, 45., 0, 0])
bundle2 = transform_streamlines(bundle, mat)
bundle_sum_distance = BundleSumDistanceMatrixMetric()
srr = StreamlineLinearRegistration(bundle_sum_distance,
x0=np.zeros(6),
method='Powell')
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
bundle_min_distance = BundleMinDistanceMatrixMetric()
srr = StreamlineLinearRegistration(bundle_min_distance,
x0=np.zeros(6),
method='Powell')
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
assert_raises(ValueError, StreamlineLinearRegistration, method='Whatever')
def test_rigid_partial_real_bundles():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
moving_center, shift2 = center_streamlines(moving)
print(shift2)
mat = compose_matrix(translate=np.array([0, 0, 0.]),
angles=np.deg2rad([40, 0, 0.]))
moved = transform_streamlines(moving_center, mat)
srr = StreamlineLinearRegistration()
srm = srr.optimize(static_center, moved)
print(srm.fopt)
print(srm.iterations)
print(srm.funcs)
moving_back = srm.transform(moved)
print(srm.matrix)
static_center = set_number_of_points(static_center, 100)
moving_center = set_number_of_points(moving_back, 100)
vol = np.zeros((100, 100, 100))
spts = np.concatenate(static_center, axis=0)
spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])
mpts = np.concatenate(moving_center, axis=0)
mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])
for index in spts:
i, j, k = index
vol[i, j, k] = 1
vol2 = np.zeros((100, 100, 100))
for index in mpts:
i, j, k = index
vol2[i, j, k] = 1
overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))
assert_equal(overlap * 100 > 40, True)
def test_stream_rigid():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
mat = compose_matrix44([0, 0, 0, 0, 40, 0])
moving = transform_streamlines(moving, mat)
srr = StreamlineLinearRegistration()
sr_params = srr.optimize(static, moving)
moved = transform_streamlines(moving, sr_params.matrix)
srr = StreamlineLinearRegistration(verbose=True)
srm = srr.optimize(static, moving)
moved2 = transform_streamlines(moving, srm.matrix)
moved3 = srm.transform(moving)
assert_array_almost_equal(moved[0], moved2[0], decimal=3)
assert_array_almost_equal(moved2[0], moved3[0], decimal=3)
def test_min_vs_min_fast_precision():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
static = [s.astype('f8') for s in static]
moving = [m.astype('f8') for m in moving]
bmd = BundleMinDistanceMatrixMetric()
bmd.setup(static, moving)
bmdf = BundleMinDistanceMetric()
bmdf.setup(static, moving)
x_test = [0.01, 0, 0, 0, 0, 0]
print(bmd.distance(x_test))
print(bmdf.distance(x_test))
assert_equal(bmd.distance(x_test), bmdf.distance(x_test))
def test_same_number_of_points():
A = [np.random.rand(10, 3), np.random.rand(20, 3)]
B = [np.random.rand(21, 3), np.random.rand(30, 3)]
C = [np.random.rand(10, 3), np.random.rand(10, 3)]
D = [np.random.rand(20, 3), np.random.rand(20, 3)]
slr = StreamlineLinearRegistration()
assert_raises(ValueError, slr.optimize, A, B)
assert_raises(ValueError, slr.optimize, C, D)
assert_raises(ValueError, slr.optimize, C, B)
def test_efficient_bmd():
a = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
streamlines = [a, a + 2, a + 4]
points, offsets = unlist_streamlines(streamlines)
points = points.astype(np.double)
points2 = points.copy()
D = np.zeros((len(offsets), len(offsets)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
assert_equal(np.sum(np.diag(D)), 0)
points2 += 2
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
streamlines2 = relist_streamlines(points2, offsets)
D2 = distance_matrix_mdf(streamlines, streamlines2)
assert_array_almost_equal(D, D2)
cols = D2.shape[1]
rows = D2.shape[0]
dist = 0.25 * (np.sum(np.min(D2, axis=0)) / float(cols) +
np.sum(np.min(D2, axis=1)) / float(rows)) ** 2
dist2 = _bundle_minimum_distance(points, points2,
len(offsets), len(offsets),
a.shape[0])
assert_almost_equal(dist, dist2)
def test_openmp_locks():
static = []
moving = []
pts = 20
for i in range(1000):
s = np.random.rand(pts, 3)
static.append(s)
moving.append(s + 2)
moving = moving[2:]
points, offsets = unlist_streamlines(static)
points2, offsets2 = unlist_streamlines(moving)
D = np.zeros((len(offsets), len(offsets2)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets2),
pts, D)
dist1 = 0.25 * (np.sum(np.min(D, axis=0)) / float(D.shape[1]) +
np.sum(np.min(D, axis=1)) / float(D.shape[0])) ** 2
dist2 = _bundle_minimum_distance(points, points2,
len(offsets), len(offsets2),
pts)
assert_almost_equal(dist1, dist2, 6)
def test_from_to_rigid():
t = np.array([10, 2, 3, 0.1, 20., 30.])
mat = compose_matrix44(t)
vec = decompose_matrix44(mat, 6)
assert_array_almost_equal(t, vec)
t = np.array([0, 0, 0, 180, 0., 0.])
mat = np.eye(4)
mat[0, 0] = -1
vec = decompose_matrix44(mat, 6)
assert_array_almost_equal(-t, vec)
def test_matrix44():
assert_raises(ValueError, compose_matrix44, np.ones(5))
assert_raises(ValueError, compose_matrix44, np.ones(9))
assert_raises(ValueError, compose_matrix44, np.ones(16))
def test_abstract_metric_class():
class DummyStreamlineMetric(StreamlineDistanceMetric):
def test():
pass
assert_raises(TypeError, DummyStreamlineMetric)
def test_evolution_of_previous_iterations():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
moving = [m + np.array([10., 0., 0.]) for m in moving]
slr = StreamlineLinearRegistration(evolution=True)
from dipy.core.optimize import SCIPY_LESS_0_12
if not SCIPY_LESS_0_12:
slm = slr.optimize(static, moving)
assert_equal(len(slm.matrix_history), slm.iterations)
def test_similarity_real_bundles():
bundle_initial = fornix_streamlines()
bundle_initial, shift = center_streamlines(bundle_initial)
bundle = bundle_initial[:20]
xgold = [0, 0, 10, 0, 0, 0, 1.5]
mat = compose_matrix44(xgold)
bundle2 = transform_streamlines(bundle_initial[:20], mat)
metric = BundleMinDistanceMatrixMetric()
x0 = np.array([0, 0, 0, 0, 0, 0, 1], 'f8')
slr = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='Powell',
bounds=None,
verbose=False)
slm = slr.optimize(bundle, bundle2)
new_bundle2 = slm.transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_affine_real_bundles():
bundle_initial = fornix_streamlines()
bundle_initial, shift = center_streamlines(bundle_initial)
bundle = bundle_initial[:20]
xgold = [0, 4, 2, 0, 10, 10, 1.2, 1.1, 1., 0., 0.2, 0.]
mat = compose_matrix44(xgold)
bundle2 = transform_streamlines(bundle_initial[:20], mat)
x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])
x = 25
bounds = [(-x, x), (-x, x), (-x, x),
(-x, x), (-x, x), (-x, x),
(0.1, 1.5), (0.1, 1.5), (0.1, 1.5),
(-1, 1), (-1, 1), (-1, 1)]
options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5, 'eps': 1e-8}
metric = BundleMinDistanceMatrixMetric()
slr = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='L-BFGS-B',
bounds=bounds,
verbose=True,
options=options)
slm = slr.optimize(bundle, bundle2)
new_bundle2 = slm.transform(bundle2)
slr2 = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='Powell',
bounds=None,
verbose=True,
options=None)
slm2 = slr2.optimize(bundle, new_bundle2)
new_bundle2 = slm2.transform(new_bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_vectorize_streamlines():
cingulum_bundles = two_cingulum_bundles()
cb_subj1 = cingulum_bundles[0]
cb_subj1 = set_number_of_points(cb_subj1, 10)
cb_subj1_pts_no = np.array([s.shape[0] for s in cb_subj1])
assert_equal(np.all(cb_subj1_pts_no == 10), True)
def test_x0_input():
for x0 in [6, 7, 12, "Rigid", 'rigid', "similarity", "Affine"]:
StreamlineLinearRegistration(x0=x0)
for x0 in [np.random.rand(6), np.random.rand(7), np.random.rand(12)]:
StreamlineLinearRegistration(x0=x0)
for x0 in [8, 20, "Whatever", np.random.rand(20), np.random.rand(20, 3)]:
assert_raises(ValueError, StreamlineLinearRegistration, x0=x0)
x0 = np.random.rand(4, 3)
assert_raises(ValueError, StreamlineLinearRegistration, x0=x0)
x0_6 = np.zeros(6)
x0_7 = np.array([0, 0, 0, 0, 0, 0, 1.])
x0_12 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])
x0_s = [x0_6, x0_7, x0_12, x0_6, x0_7, x0_12]
for i, x0 in enumerate([6, 7, 12, "Rigid", "similarity", "Affine"]):
slr = StreamlineLinearRegistration(x0=x0)
assert_equal(slr.x0, x0_s[i])
def test_compose_decompose_matrix44():
for i in range(20):
x0 = np.random.rand(12)
mat = compose_matrix44(x0[:6])
assert_array_almost_equal(x0[:6], decompose_matrix44(mat, size=6))
mat = compose_matrix44(x0[:7])
assert_array_almost_equal(x0[:7], decompose_matrix44(mat, size=7))
mat = compose_matrix44(x0[:12])
assert_array_almost_equal(x0[:12], decompose_matrix44(mat, size=12))
assert_raises(ValueError, decompose_matrix44, mat, 20)
def test_cascade_of_optimizations_and_threading():
cingulum_bundles = two_cingulum_bundles()
cb1 = cingulum_bundles[0]
cb1 = set_number_of_points(cb1, 20)
test_x0 = np.array([10, 4, 3, 0, 20, 10, 1.5, 1.5, 1.5, 0., 0.2, 0])
cb2 = transform_streamlines(cingulum_bundles[0],
compose_matrix44(test_x0))
cb2 = set_number_of_points(cb2, 20)
print('first rigid')
slr = StreamlineLinearRegistration(x0=6, num_threads=1)
slm = slr.optimize(cb1, cb2)
print('then similarity')
slr2 = StreamlineLinearRegistration(x0=7, num_threads=2)
slm2 = slr2.optimize(cb1, cb2, slm.matrix)
print('then affine')
slr3 = StreamlineLinearRegistration(x0=12, options={'maxiter': 50},
num_threads=None)
slm3 = slr3.optimize(cb1, cb2, slm2.matrix)
assert_(slm2.fopt < slm.fopt)
assert_(slm3.fopt < slm2.fopt)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
openiitbombayx/edx-platform | openedx/core/djangoapps/course_groups/management/commands/tests/test_remove_users_from_multiple_cohorts.py | 91 | 3951 | """
Tests for cleanup of users which are added in multiple cohorts of a course
"""
from django.core.exceptions import MultipleObjectsReturned
from django.core.management import call_command
from django.test.client import RequestFactory
from openedx.core.djangoapps.course_groups.views import cohort_handler
from openedx.core.djangoapps.course_groups.cohorts import get_cohort, get_cohort_by_name
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestMultipleCohortUsers(ModuleStoreTestCase):
"""
Base class for testing users with multiple cohorts
"""
def setUp(self):
"""
setup course, user and request for tests
"""
super(TestMultipleCohortUsers, self).setUp()
self.course1 = CourseFactory.create()
self.course2 = CourseFactory.create()
self.user1 = UserFactory(is_staff=True)
self.user2 = UserFactory(is_staff=True)
self.request = RequestFactory().get("dummy_url")
self.request.user = self.user1
def test_users_with_multiple_cohorts_cleanup(self):
"""
Test that user which have been added in multiple cohorts of a course,
can get cohorts without error after running cohorts cleanup command
"""
# set two auto_cohort_groups for both courses
config_course_cohorts(
self.course1, is_cohorted=True, auto_cohorts=["Course1AutoGroup1", "Course1AutoGroup2"]
)
config_course_cohorts(
self.course2, is_cohorted=True, auto_cohorts=["Course2AutoGroup1", "Course2AutoGroup2"]
)
# get the cohorts from the courses, which will cause auto cohorts to be created
cohort_handler(self.request, unicode(self.course1.id))
cohort_handler(self.request, unicode(self.course2.id))
course_1_auto_cohort_1 = get_cohort_by_name(self.course1.id, "Course1AutoGroup1")
course_1_auto_cohort_2 = get_cohort_by_name(self.course1.id, "Course1AutoGroup2")
course_2_auto_cohort_1 = get_cohort_by_name(self.course2.id, "Course2AutoGroup1")
# forcefully add user1 in two auto cohorts
course_1_auto_cohort_1.users.add(self.user1)
course_1_auto_cohort_2.users.add(self.user1)
# forcefully add user2 in auto cohorts of both courses
course_1_auto_cohort_1.users.add(self.user2)
course_2_auto_cohort_1.users.add(self.user2)
# now check that when user1 goes on discussion page and tries to get
# cohorts 'MultipleObjectsReturned' exception is returned
with self.assertRaises(MultipleObjectsReturned):
get_cohort(self.user1, self.course1.id)
# also check that user 2 can go on discussion page of both courses
# without any exception
get_cohort(self.user2, self.course1.id)
get_cohort(self.user2, self.course2.id)
# call command to remove users added in multiple cohorts of a course
# are removed from all cohort groups
call_command('remove_users_from_multiple_cohorts')
# check that only user1 (with multiple cohorts) is removed from cohorts
# and user2 is still in auto cohorts of both course after running
# 'remove_users_from_multiple_cohorts' management command
self.assertEqual(self.user1.course_groups.count(), 0)
self.assertEqual(self.user2.course_groups.count(), 2)
user2_cohorts = list(self.user2.course_groups.values_list('name', flat=True))
self.assertEqual(user2_cohorts, ['Course1AutoGroup1', 'Course2AutoGroup1'])
# now check that user1 can get cohorts in which he is added
response = cohort_handler(self.request, unicode(self.course1.id))
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
sticksnleaves/ghost-blog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/__init__.py | 269 | 2974 | # -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.6'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
| mit |
fighterlyt/bite-project | deps/gdata-python-client/samples/contacts/profiles_example.py | 42 | 8364 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a Sample for Google Profiles.
ProfilesSample: demonstrates operations with the Profiles feed.
"""
__author__ = 'jtoledo (Julian Toledo)'
import getopt
import getpass
import sys
import gdata.contacts
import gdata.contacts.service
class ProfilesSample(object):
"""ProfilesSample object demonstrates operations with the Profiles feed."""
def __init__(self, email, password, domain):
"""Constructor for the ProfilesSample object.
Takes an email and password corresponding to a gmail account to
demonstrate the functionality of the Profiles feed.
Args:
email: [string] The e-mail address of the account to use for the sample.
password: [string] The password corresponding to the account specified by
the email parameter.
domain: [string] The domain for the Profiles feed
"""
self.gd_client = gdata.contacts.service.ContactsService(
contact_list=domain)
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'GoogleInc-ProfilesPythonSample-1'
self.gd_client.ProgrammaticLogin()
def PrintFeed(self, feed, ctr=0):
"""Prints out the contents of a feed to the console.
Args:
feed: A gdata.profiles.ProfilesFeed instance.
ctr: [int] The number of entries in this feed previously printed. This
allows continuous entry numbers when paging through a feed.
Returns:
The number of entries printed, including those previously printed as
specified in ctr. This is for passing as an ar1gument to ctr on
successive calls to this method.
"""
if not feed.entry:
print '\nNo entries in feed.\n'
return 0
for entry in feed.entry:
self.PrintEntry(entry)
return len(feed.entry) + ctr
def PrintEntry(self, entry):
"""Prints out the contents of a single Entry to the console.
Args:
entry: A gdata.contacts.ProfilesEntry
"""
print '\n%s' % (entry.title.text)
for email in entry.email:
if email.primary == 'true':
print 'Email: %s (primary)' % (email.address)
else:
print 'Email: %s' % (email.address)
if entry.nickname:
print 'Nickname: %s' % (entry.nickname.text)
if entry.occupation:
print 'Occupation: %s' % (entry.occupation.text)
if entry.gender:
print 'Gender: %s' % (entry.gender.value)
if entry.birthday:
print 'Birthday: %s' % (entry.birthday.when)
for relation in entry.relation:
print 'Relation: %s %s' % (relation.rel, relation.text)
for user_defined_field in entry.user_defined_field:
print 'UserDefinedField: %s %s' % (user_defined_field.key,
user_defined_field.value)
for website in entry.website:
print 'Website: %s %s' % (website.href, website.rel)
for phone_number in entry.phone_number:
print 'Phone Number: %s' % phone_number.text
for organization in entry.organization:
print 'Organization:'
if organization.org_name:
print ' Name: %s' % (organization.org_name.text)
if organization.org_title:
print ' Title: %s' % (organization.org_title.text)
if organization.org_department:
print ' Department: %s' % (organization.org_department.text)
if organization.org_job_description:
print ' Job Desc: %s' % (organization.org_job_description.text)
def PrintPaginatedFeed(self, feed, print_method):
"""Print all pages of a paginated feed.
This will iterate through a paginated feed, requesting each page and
printing the entries contained therein.
Args:
feed: A gdata.contacts.ProfilesFeed instance.
print_method: The method which will be used to print each page of the
"""
ctr = 0
while feed:
# Print contents of current feed
ctr = print_method(feed=feed, ctr=ctr)
# Prepare for next feed iteration
next = feed.GetNextLink()
feed = None
if next:
if self.PromptOperationShouldContinue():
# Another feed is available, and the user has given us permission
# to fetch it
feed = self.gd_client.GetProfilesFeed(next.href)
else:
# User has asked us to terminate
feed = None
def PromptOperationShouldContinue(self):
"""Display a "Continue" prompt.
This give is used to give users a chance to break out of a loop, just in
case they have too many profiles/groups.
Returns:
A boolean value, True if the current operation should continue, False if
the current operation should terminate.
"""
while True:
key_input = raw_input('Continue [Y/n]? ')
if key_input is 'N' or key_input is 'n':
return False
elif key_input is 'Y' or key_input is 'y' or key_input is '':
return True
def ListAllProfiles(self):
"""Retrieves a list of profiles and displays name and primary email."""
feed = self.gd_client.GetProfilesFeed()
self.PrintPaginatedFeed(feed, self.PrintFeed)
def SelectProfile(self):
username = raw_input('Please enter your username for the profile: ')
entry_uri = self.gd_client.GetFeedUri('profiles')+'/'+username
try:
entry = self.gd_client.GetProfile(entry_uri)
self.PrintEntry(entry)
except gdata.service.RequestError:
print 'Invalid username for the profile.'
def PrintMenu(self):
"""Displays a menu of options for the user to choose from."""
print ('\nProfiles Sample\n'
'1) List all of your Profiles.\n'
'2) Get a single Profile.\n'
'3) Exit.\n')
def GetMenuChoice(self, maximum):
"""Retrieves the menu selection from the user.
Args:
maximum: [int] The maximum number of allowed choices (inclusive)
Returns:
The integer of the menu item chosen by the user.
"""
while True:
key_input = raw_input('> ')
try:
num = int(key_input)
except ValueError:
print 'Invalid choice. Please choose a value between 1 and', maximum
continue
if num > maximum or num < 1:
print 'Invalid choice. Please choose a value between 1 and', maximum
else:
return num
def Run(self):
"""Prompts the user to choose funtionality to be demonstrated."""
try:
while True:
self.PrintMenu()
choice = self.GetMenuChoice(3)
if choice == 1:
self.ListAllProfiles()
elif choice == 2:
self.SelectProfile()
elif choice == 3:
return
except KeyboardInterrupt:
print '\nGoodbye.'
return
def main():
"""Demonstrates use of the Profiles using the ProfilesSample object."""
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=', 'domain='])
except getopt.error, msg:
print 'python profiles_example.py --user [username] --pw [password]'
print ' --domain [domain]'
sys.exit(2)
user = ''
pw = ''
domain = ''
# Process options
for option, arg in opts:
if option == '--user':
user = arg
elif option == '--pw':
pw = arg
elif option == '--domain':
domain = arg
while not user:
print 'NOTE: Please run these tests only with a test account.'
user = raw_input('Please enter your email: ')
while not pw:
pw = getpass.getpass('Please enter password: ')
if not pw:
print 'Password cannot be blank.'
while not domain:
domain = raw_input('Please enter your Apps domain: ')
try:
sample = ProfilesSample(user, pw, domain)
except gdata.service.BadAuthentication:
print 'Invalid user credentials given.'
return
sample.Run()
if __name__ == '__main__':
main()
| apache-2.0 |
devanshdalal/scikit-learn | sklearn/metrics/scorer.py | 33 | 17925 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, mean_squared_log_error, accuracy_score,
f1_score, roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from .cluster import homogeneity_score
from .cluster import completeness_score
from .cluster import v_measure_score
from .cluster import mutual_info_score
from .cluster import adjusted_mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
# XXX After removing the deprecated scorers (v0.20) remove the
# XXX deprecation_msg property again and remove __call__'s body again
self._deprecation_msg = None
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
if self._deprecation_msg is not None:
warnings.warn(self._deprecation_msg,
category=DeprecationWarning,
stacklevel=2)
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_PredictScorer, self).__call__(estimator, X, y_true,
sample_weight=sample_weight)
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ProbaScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ThresholdScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
scorers = [scorer for scorer in SCORERS
if SCORERS[scorer]._deprecation_msg is None]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(scorers)))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
if isinstance(scoring, six.string_types):
return get_scorer(scoring)
elif has_scoring:
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, '__module__', None)
if hasattr(module, 'startswith') and \
module.startswith('sklearn.metrics.') and \
not module.startswith('sklearn.metrics.scorer') and \
not module.startswith('sklearn.metrics.tests.'):
raise ValueError('scoring value %r looks like it is a metric '
'function rather than a scorer. A scorer should '
'require an estimator as its first parameter. '
'Please use `make_scorer` to convert a metric '
'to a scorer.' % scoring)
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_squared_error was renamed to '
'neg_mean_squared_error in version 0.18 and will '
'be removed in 0.20.')
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_squared_error_scorer._deprecation_msg = deprecation_msg
neg_mean_squared_log_error_scorer = make_scorer(mean_squared_log_error,
greater_is_better=False)
neg_mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_absolute_error was renamed to '
'neg_mean_absolute_error in version 0.18 and will '
'be removed in 0.20.')
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
mean_absolute_error_scorer._deprecation_msg = deprecation_msg
neg_median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method median_absolute_error was renamed to '
'neg_median_absolute_error in version 0.18 and will '
'be removed in 0.20.')
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
median_absolute_error_scorer._deprecation_msg = deprecation_msg
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
neg_log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
deprecation_msg = ('Scoring method log_loss was renamed to '
'neg_log_loss in version 0.18 and will be removed in 0.20.')
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
log_loss_scorer._deprecation_msg = deprecation_msg
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
homogeneity_scorer = make_scorer(homogeneity_score)
completeness_scorer = make_scorer(completeness_score)
v_measure_scorer = make_scorer(v_measure_score)
mutual_info_scorer = make_scorer(mutual_info_score)
adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score)
normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score)
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
SCORERS = dict(r2=r2_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
neg_mean_squared_log_error=neg_mean_squared_log_error_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
neg_log_loss=neg_log_loss_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
homogeneity_score=homogeneity_scorer,
completeness_score=completeness_scorer,
v_measure_score=v_measure_scorer,
mutual_info_score=mutual_info_scorer,
adjusted_mutual_info_score=adjusted_mutual_info_scorer,
normalized_mutual_info_score=normalized_mutual_info_scorer,
fowlkes_mallows_score=fowlkes_mallows_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
krishnazure/Flask | Work/Trivia - Module 5/env/Lib/site-packages/flask/testsuite/reqctx.py | 557 | 5960 | # -*- coding: utf-8 -*-
"""
flask.testsuite.reqctx
~~~~~~~~~~~~~~~~~~~~~~
Tests the request context.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
try:
from greenlet import greenlet
except ImportError:
greenlet = None
from flask.testsuite import FlaskTestCase
class RequestContextTestCase(FlaskTestCase):
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_true(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_false(flask.request)
self.assert_false(flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_true(flask.request)
self.assert_true(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_true(0, 'expected runtime error')
def test_greenlet_context_copying(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
def g():
self.assert_false(flask.request)
self.assert_false(flask.current_app)
with reqctx:
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
self.assert_false(flask.request)
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
def test_greenlet_context_copying_api(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
@flask.copy_current_request_context
def g():
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
# Disable test if we don't have greenlets available
if greenlet is None:
test_greenlet_context_copying = None
test_greenlet_context_copying_api = None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestContextTestCase))
return suite
| apache-2.0 |
superdesk/Live-Blog | plugins/media-archive/superdesk/media_archive/impl/meta_data.py | 2 | 7300 | '''
Created on Apr 19, 2012
@package: superdesk media archive
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
SQL Alchemy based implementation for the meta data API.
'''
from ..api.meta_data import QMetaData
from ..core.impl.meta_service_base import MetaDataServiceBaseAlchemy
from ..core.spec import IMetaDataHandler, IMetaDataReferencer, IThumbnailManager
from ..meta.meta_data import MetaDataMapped
from ally.api.model import Content
from ally.cdm.spec import ICDM
from ally.container import wire, app
from ally.container.ioc import injected
from ally.container.support import setup
from ally.exception import InputError
from ally.internationalization import _
from ally.support.sqlalchemy.util_service import handle
from ally.support.util_sys import pythonPath
from datetime import datetime
from os.path import join, getsize, abspath
from sqlalchemy.exc import SQLAlchemyError
from superdesk.language.meta.language import LanguageEntity
from superdesk.media_archive.api.meta_data import IMetaDataUploadService
from superdesk.media_archive.core.impl.meta_service_base import metaTypeFor, \
thumbnailFormatFor
from superdesk.media_archive.core.impl.query_service_creator import \
ISearchProvider
from superdesk.media_archive.meta.meta_data import META_TYPE_KEY
from superdesk.media_archive.meta.meta_info import MetaInfoMapped
# --------------------------------------------------------------------
@injected
@setup(IMetaDataUploadService, name='metaDataService')
class MetaDataServiceAlchemy(MetaDataServiceBaseAlchemy, IMetaDataReferencer, IMetaDataUploadService):
'''
Implementation for @see: IMetaDataService, @see: IMetaDataUploadService , and also provides services
as the @see: IMetaDataReferencer
'''
format_file_name = '%(id)s.%(name)s'; wire.config('format_file_name', doc='''
The format for the files names in the media archive''')
format_thumbnail = '%(size)s/other.jpg'; wire.config('format_thumbnail', doc='''
The format for the unknown thumbnails in the media archive''')
cdmArchive = ICDM; wire.entity('cdmArchive')
thumbnailManager = IThumbnailManager; wire.entity('thumbnailManager')
metaDataHandlers = list; wire.entity('metaDataHandlers')
# The handlers list used by the meta data in order to get the references.
searchProvider = ISearchProvider; wire.entity('searchProvider')
# The search provider that will be used to manage all search related activities
default_media_language = 'en'; wire.config('default_media_language')
languageId = None
def __init__(self):
'''
Construct the meta data service.
'''
assert isinstance(self.format_file_name, str), 'Invalid format file name %s' % self.format_file_name
assert isinstance(self.format_thumbnail, str), 'Invalid format thumbnail %s' % self.format_thumbnail
assert isinstance(self.cdmArchive, ICDM), 'Invalid archive CDM %s' % self.cdmArchive
assert isinstance(self.thumbnailManager, IThumbnailManager), 'Invalid thumbnail manager %s' % self.thumbnailManager
assert isinstance(self.metaDataHandlers, list), 'Invalid reference handlers %s' % self.referenceHandlers
assert isinstance(self.searchProvider, ISearchProvider), 'Invalid search provider %s' % self.searchProvider
MetaDataServiceBaseAlchemy.__init__(self, MetaDataMapped, QMetaData, self, self.cdmArchive, self.thumbnailManager)
self._thumbnailFormatId = self._metaTypeId = None
# ----------------------------------------------------------------
def insert(self, userId, content, scheme, thumbSize=None):
'''
@see: IMetaDataService.insert
'''
assert isinstance(content, Content), 'Invalid content %s' % content
if not content.name: raise InputError(_('No name specified for content'))
if self.languageId is None:
self.languageId = self.session().query(LanguageEntity).filter(LanguageEntity.Code == self.default_media_language).one().Id
metaData = MetaDataMapped()
# TODO: check this
# metaData.CreatedOn = current_timestamp()
metaData.CreatedOn = datetime.now()
metaData.Creator = userId
metaData.Name = content.name
metaData.typeId = self.metaTypeId()
metaData.Type = META_TYPE_KEY
metaData.thumbnailFormatId = self.thumbnailFormatId()
try:
self.session().add(metaData)
self.session().flush((metaData,))
path = self.format_file_name % {'id': metaData.Id, 'name': metaData.Name}
path = ''.join((META_TYPE_KEY, '/', self.generateIdPath(metaData.Id), '/', path))
contentPath = self.cdmArchive.getURI(path, 'file')
self.cdmArchive.publishContent(path, content)
metaData.content = path
metaData.SizeInBytes = getsize(contentPath)
found = False
for handler in self.metaDataHandlers:
assert isinstance(handler, IMetaDataHandler), 'Invalid handler %s' % handler
if handler.processByInfo(metaData, contentPath, content.type):
metaInfo = handler.addMetaInfo(metaData, self.languageId)
found = True
break
else:
for handler in self.metaDataHandlers:
if handler.process(metaData, contentPath):
metaInfo = handler.addMetaInfo(metaData, self.languageId)
found = True
break
if found:
self.session().merge(metaData)
self.session().flush((metaData,))
else:
metaInfo = MetaInfoMapped()
metaInfo.MetaData = metaData.Id
metaInfo.Language = self.languageId
self.session().add(metaInfo)
self.session().flush((metaData, metaInfo,))
self.searchProvider.update(metaInfo, metaData)
except SQLAlchemyError as e: handle(e, metaData)
if metaData.content != path:
self.cdmArchive.republish(path, metaData.content)
return self.getById(metaData.Id, scheme, thumbSize)
# ----------------------------------------------------------------
@app.populate
def populateThumbnail(self):
'''
Populates the thumbnail for other resources.
'''
self.thumbnailManager.putThumbnail(self.thumbnailFormatId(),
abspath(join(pythonPath(), 'resources', 'other.jpg')))
# ----------------------------------------------------------------
def metaTypeId(self):
'''
Provides the meta type id.
'''
if self._metaTypeId is None: self._metaTypeId = metaTypeFor(self.session(), META_TYPE_KEY).Id
return self._metaTypeId
def thumbnailFormatId(self):
'''
Provides the thumbnail format id.
'''
if not self._thumbnailFormatId: self._thumbnailFormatId = thumbnailFormatFor(self.session(), self.format_thumbnail).id
return self._thumbnailFormatId
def generateIdPath (self, id):
return '{0:03d}'.format((id // 1000) % 1000)
| agpl-3.0 |
ACS-Community/ACS | LGPL/CommonSoftware/acssim/test/acssimGenericTest.py | 4 | 1839 | #!/usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# @(#) $Id: acssimGenericTest.py,v 1.3 2006/03/16 00:03:02 dfugate Exp $
#------------------------------------------------------------------------------
'''
Quite possibly the most generic component client throughout ALMA software.
'''
from sys import argv
import ACS
from Acspy.Clients.SimpleClient import PySimpleClient
compName = argv[1]
compMethod = argv[2]
print "Parameters to this generic test script are:", argv[1], argv[2]
# Make an instance of the PySimpleClient
simpleClient = PySimpleClient()
comp = simpleClient.getComponent(compName)
try:
joe = eval("comp." + argv[2])
print "The evaluated return value is:", joe
except Exception, e:
print "The exception that occured was:", e
simpleClient.releaseComponent(compName)
simpleClient.disconnect()
| lgpl-2.1 |
admcrae/tensorflow | tensorflow/contrib/quantization/python/array_ops.py | 178 | 1156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
| apache-2.0 |
skosukhin/spack | lib/spack/spack/hooks/sbang.py | 1 | 4260 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import stat
import re
import llnl.util.tty as tty
import spack
import spack.modules
# Character limit for shebang line. Using Linux's 127 characters
# here, as it is the shortest I could find on a modern OS.
shebang_limit = 127
def shebang_too_long(path):
"""Detects whether a file has a shebang line that is too long."""
if not os.path.isfile(path):
return False
with open(path, 'rb') as script:
bytes = script.read(2)
if bytes != b'#!':
return False
line = bytes + script.readline()
return len(line) > shebang_limit
def filter_shebang(path):
"""Adds a second shebang line, using sbang, at the beginning of a file."""
with open(path, 'r') as original_file:
original = original_file.read()
# This line will be prepended to file
new_sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
# Skip files that are already using sbang.
if original.startswith(new_sbang_line):
return
# In the following, newlines have to be excluded in the regular expression
# else any mention of "lua" in the document will lead to spurious matches.
# Use --! instead of #! on second line for lua.
if re.search(r'^#!(/[^/\n]*)*lua\b', original):
original = re.sub(r'^#', '--', original)
# Use //! instead of #! on second line for node.js.
if re.search(r'^#!(/[^/\n]*)*node\b', original):
original = re.sub(r'^#', '//', original)
# Change non-writable files to be writable if needed.
saved_mode = None
if not os.access(path, os.W_OK):
st = os.stat(path)
saved_mode = st.st_mode
os.chmod(path, saved_mode | stat.S_IWRITE)
with open(path, 'w') as new_file:
new_file.write(new_sbang_line)
new_file.write(original)
# Restore original permissions.
if saved_mode is not None:
os.chmod(path, saved_mode)
tty.warn("Patched overlong shebang in %s" % path)
def filter_shebangs_in_directory(directory, filenames=None):
if filenames is None:
filenames = os.listdir(directory)
for file in filenames:
path = os.path.join(directory, file)
# only handle files
if not os.path.isfile(path):
continue
# only handle links that resolve within THIS package's prefix.
if os.path.islink(path):
real_path = os.path.realpath(path)
if not real_path.startswith(directory + os.sep):
continue
# test the file for a long shebang, and filter
if shebang_too_long(path):
filter_shebang(path)
def post_install(spec):
"""This hook edits scripts so that they call /bin/bash
$spack_prefix/bin/sbang instead of something longer than the
shebang limit.
"""
if spec.external:
tty.debug('SKIP: shebang filtering [external package]')
return
for directory, _, filenames in os.walk(spec.prefix):
filter_shebangs_in_directory(directory, filenames)
| lgpl-2.1 |
15Dkatz/pants | src/python/pants/backend/jvm/zinc/zinc_analysis.py | 9 | 2497 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class ZincAnalysis(object):
"""Parsed representation of a zinc analysis.
Note also that all files in keys/values are full-path, just as they appear in the analysis file.
If you want paths relative to the build root or the classes dir or whatever, you must compute
those yourself.
"""
FORMAT_VERSION_LINE = b'format version: 6\n'
def __init__(self, compile_setup, relations, stamps, apis, source_infos, compilations):
(self.compile_setup, self.relations, self.stamps, self.apis, self.source_infos, self.compilations) = \
(compile_setup, relations, stamps, apis, source_infos, compilations)
def is_equal_to(self, other):
for self_element, other_element in zip(
(self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations),
(other.compile_setup, other.relations, other.stamps, other.apis,
other.source_infos, other.compilations)):
if not self_element.is_equal_to(other_element):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations))
def write_to_path(self, outfile_path):
with open(outfile_path, 'wb') as outfile:
self.write(outfile)
def write(self, outfile):
outfile.write(ZincAnalysis.FORMAT_VERSION_LINE)
self.compile_setup.write(outfile)
self.relations.write(outfile)
self.stamps.write(outfile)
self.apis.write(outfile)
self.source_infos.write(outfile)
self.compilations.write(outfile)
# Translate the contents of this analysis. Useful for creating anonymized test data.
# Note that the resulting file is not a valid analysis, as the base64-encoded serialized objects
# will be replaced with random base64 strings. So these are useful for testing analysis parsing,
# but not for actually reading into Zinc.
def translate(self, token_translator):
for element in [self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations]:
element.translate(token_translator)
| apache-2.0 |
efortuna/AndroidSDKClone | ndk/prebuilt/linux-x86_64/lib/python2.7/encodings/cp1258.py | 593 | 13620 | """ Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
u'\u0303' # 0xDE -> COMBINING TILDE
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\u0323' # 0xF2 -> COMBINING DOT BELOW
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
u'\u20ab' # 0xFE -> DONG SIGN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
appuio/ansible-role-openshift-zabbix-monitoring | vendor/openshift-tools/ansible/roles/lib_git/build/ansible/git_rebase.py | 13 | 1502 | # pylint: skip-file
def main():
'''
ansible git module for rebasing
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
path=dict(default=None, required=True, type='str'),
branch=dict(default=None, required=True, type='str'),
rebase_branch=dict(default=None, required=True, type='str'),
ssh_key=dict(default=None, required=False, type='str'),
),
supports_check_mode=False,
)
git = GitRebase(module.params['path'],
module.params['branch'],
module.params['rebase_branch'],
module.params['ssh_key'])
state = module.params['state']
if state == 'present':
results = git.rebase()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('no_rebase_needed'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 |
polyval/CNC | flask/Lib/site-packages/sqlalchemy/orm/query.py | 20 | 147616 | # orm/query.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative, InspectionAttr
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
# TODO: self._select_from_entity is not a mapper
# so this method is misnamed
return self._select_from_entity \
if self._select_from_entity is not None \
else self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _bind_mapper(self):
ezero = self._mapper_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`.Query.with_labels` method *only* applies
the output of :attr:`.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`.Query` itself, e.g.
:meth:`.Query.first`, :meth:`.Query.all`, etc. To execute
a query using :meth:`.Query.with_labels`, invoke the
:attr:`.Query.statement` using :meth:`.Session.execute`::
result = session.execute(query.with_labels().statement)
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True,
"max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
return self._get_impl(ident, loading.load_on_ident)
def _get_impl(self, ident, fallback_fn):
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return fallback_fn(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper_zero = inspect(self._mapper_zero()).mapper
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is mapper_zero:
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
:meth:`.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`.Query.from_self` may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\\
limit(5).from_self().\\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`.Query.from_self` is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references wil be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\\
limit(5).from_self().\\
join(User.addresses).filter(Address.email.like('q%')).\\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`.Query.subquery` method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`.Query.from_self` also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\\
limit(5).from_self(Address.email).\\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\\
options(contains_eager(Address.user))
We use :meth:`.Query.add_entity` above **before** we call
:meth:`.Query.from_self` so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._mapper_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes', '_suffixes'
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading_relationships` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='ed@foo.com').\\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == 'ed@foo.com').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The :meth:`.distinct` call includes logic that will automatically
add columns from the ORDER BY of the query to the columns
clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the
SELECT list when DISTINCT is used. These columns *are not*
added to the list of columns actually fetched by the
:class:`.Query`, however, so would not affect results.
The columns are passed through when using the
:attr:`.Query.statement` accessor, however.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
.. seealso::
:meth:`.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.prefix_with`
:meth:`.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`.Query.first` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.Query.one_or_none` results in an execution of the underlying
query.
.. versionadded:: 1.0.9
Added :meth:`.Query.one_or_none`
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one_or_none`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._bind_mapper(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(insp_ent, 'is_aliased_class', False),
'expr': ent.expr,
'entity':
getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None
}
for ent, insp_ent in [
(
_ent,
(inspect(_ent.entity_zero)
if _ent.entity_zero is not None else None)
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'suffixes': self._suffixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`.Query.delete` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate', update_args=None):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`.Query.update` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`.update` construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None:
self = new_query
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
loading._setup_entity_query(
context, self.mapper, self,
self.path, adapter, context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
_entity = getattr(column, '_parententity', None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, '_select_iterable'):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
elif not check_column:
self._label_name = getattr(column, 'key', None)
search_entities = True
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
else:
self.entities = []
self._from_entities = set(self.entities)
else:
all_elements = [
elem for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
]
self.entities = util.unique_list([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
])
self._from_entities = set([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
])
if self.entities:
self.entity_zero = self.entities[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
if self.actual_froms.intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ('fetch_column', self) in context.attributes:
column = context.attributes[('fetch_column', self)]
else:
column = query._adapt_clause(self.column, False, True)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[('fetch_column', self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
'multi_row_eager_loaders', 'adapter', 'froms', 'for_update',
'query', 'session', 'autoflush', 'populate_existing',
'invoke_all_eagers', 'version_check', 'refresh_state',
'primary_columns', 'secondary_columns', 'eager_order_by',
'eager_joins', 'create_eager_joins', 'propagate_options',
'attributes', 'statement', 'from_clause', 'whereclause',
'order_by', 'labels', '_for_update_arg', 'runid', 'partials'
)
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| apache-2.0 |
Sorsly/subtle | google-cloud-sdk/lib/third_party/argcomplete/my_shlex.py | 35 | 11545 | # -*- coding: utf-8 -*-
# This copy of shlex.py is distributed with argcomplete.
# It incorporates changes proposed in http://bugs.python.org/issue1521950 and changes to allow it to match Unicode
# word characters.
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# changes to tokenize more like Posix shells by Vinay Sajip, January 2012.
import os.path, sys, re
from collections import deque
# Note: cStringIO is not compatible with Unicode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
basestring
except NameError:
basestring = str
__all__ = ["shlex", "split"]
class UnicodeWordchars:
''' A replacement for shlex.wordchars that also matches (__contains__) any Unicode wordchars.
'''
def __init__(self, wordchars):
self.wordchars = wordchars
self.uw_regex = re.compile('\w', flags=re.UNICODE)
def __contains__(self, c):
return c in self.wordchars or self.uw_regex.match(c)
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False, punctuation_chars=False):
if isinstance(instream, basestring):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if not punctuation_chars:
punctuation_chars = ''
elif punctuation_chars is True:
punctuation_chars = '();<>|&'
self.punctuation_chars = punctuation_chars
if punctuation_chars:
# _pushback_chars is a push back queue used by lookahead logic
self._pushback_chars = deque()
# these chars added because allowed in file names, args, wildcards
self.wordchars += '~-./*?=:@'
#remove any punctuation chars from wordchars
self.wordchars = ''.join(c for c in self.wordchars if c not in
self.punctuation_chars)
for c in punctuation_chars:
if c in self.wordchars:
self.wordchars.remove(c)
if self.posix:
self.wordchars = UnicodeWordchars(self.wordchars)
self.first_colon_pos = None
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, basestring):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
if self.punctuation_chars and self._pushback_chars:
nextchar = self._pushback_chars.pop()
else:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno += 1
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.punctuation_chars:
self.token = nextchar
self.state = 'c'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token += nextchar
self.state = ' '
break
else:
self.state = 'a'
elif (self.posix and nextchar in self.escape and self.state
in self.escapedquotes):
escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
if not nextchar: # end of file
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (escapedstate in self.quotes and
nextchar != self.state and nextchar != escapedstate):
self.token += self.state
self.token += nextchar
self.state = escapedstate
elif self.state in ('a', 'c'):
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif self.state == 'c':
if nextchar in self.punctuation_chars:
self.token += nextchar
else:
if nextchar not in self.whitespace:
self._pushback_chars.append(nextchar)
self.state = ' '
break
elif (nextchar in self.wordchars or nextchar in self.quotes
or self.whitespace_split):
self.token += nextchar
if nextchar == ':':
self.first_colon_pos = len(self.token)-1
else:
if self.punctuation_chars:
self._pushback_chars.append(nextchar)
else:
self.pushback.appendleft(nextchar)
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True, punctuation_chars=False):
lex = shlex(s, posix=posix, punctuation_chars=punctuation_chars)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
| mit |
iulian787/spack | lib/spack/external/_pytest/python_api.py | 18 | 23422 | import math
import sys
import py
from _pytest.compat import isclass, izip
from _pytest.outcomes import fail
import _pytest._code
def _cmp_raises_type_error(self, other):
"""__cmp__ implementation which raises TypeError. Used
by Approx base classes to implement only == and != and raise a
TypeError for other comparisons.
Needed in Python 2 only, Python 3 all it takes is not implementing the
other operators at all.
"""
__tracebackhide__ = True
raise TypeError('Comparison operators other than == and != not supported by approx objects')
# builtin pytest.approx helper
class ApproxBase(object):
"""
Provide shared utilities for making approximate comparisons between numbers
or sequences of numbers.
"""
def __init__(self, expected, rel=None, abs=None, nan_ok=False):
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
def __repr__(self):
raise NotImplementedError
def __eq__(self, actual):
return all(
a == self._approx_scalar(x)
for a, x in self._yield_comparisons(actual))
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def _approx_scalar(self, x):
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""
Yield all the pairs of numbers to be compared. This is used to
implement the `__eq__` method.
"""
raise NotImplementedError
class ApproxNumpy(ApproxBase):
"""
Perform approximate comparisons for numpy arrays.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_priority__ = 100
def __repr__(self):
# It might be nice to rewrite this function to account for the
# shape of the array...
return "approx({0!r})".format(list(
self._approx_scalar(x) for x in self.expected))
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def __eq__(self, actual):
import numpy as np
try:
actual = np.asarray(actual)
except: # noqa
raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual))
if actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# We can be sure that `actual` is a numpy array, because it's
# casted in `__eq__` before being passed to `ApproxBase.__eq__`,
# which is the only method that calls this one.
for i in np.ndindex(self.expected.shape):
yield actual[i], self.expected[i]
class ApproxMapping(ApproxBase):
"""
Perform approximate comparisons for mappings where the values are numbers
(the keys can be anything).
"""
def __repr__(self):
return "approx({0!r})".format(dict(
(k, self._approx_scalar(v))
for k, v in self.expected.items()))
def __eq__(self, actual):
if set(actual.keys()) != set(self.expected.keys()):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
class ApproxSequence(ApproxBase):
"""
Perform approximate comparisons for sequences of numbers.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_priority__ = 100
def __repr__(self):
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({0!r})".format(seq_type(
self._approx_scalar(x) for x in self.expected))
def __eq__(self, actual):
if len(actual) != len(self.expected):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return izip(actual, self.expected)
class ApproxScalar(ApproxBase):
"""
Perform approximate comparisons for single numbers only.
"""
def __repr__(self):
"""
Return a string communicating both the expected value and the tolerance
for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode
plus/minus symbol if this is python3 (it's too hard to get right for
python2).
"""
if isinstance(self.expected, complex):
return str(self.expected)
# Infinities aren't compared using tolerances, so don't show a
# tolerance.
if math.isinf(self.expected):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = '{:.1e}'.format(self.tolerance)
except ValueError:
vetted_tolerance = '???'
if sys.version_info[0] == 2:
return '{0} +- {1}'.format(self.expected, vetted_tolerance)
else:
return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
def __eq__(self, actual):
"""
Return true if the given value is equal to the expected value within
the pre-specified tolerance.
"""
# Short-circuit exact equality.
if actual == self.expected:
return True
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
return abs(self.expected - actual) <= self.tolerance
__hash__ = None
@property
def tolerance(self):
"""
Return the tolerance for the comparison. This could be either an
absolute tolerance or a relative tolerance, depending on what the user
specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, 1e-12)
if absolute_tolerance < 0:
raise ValueError("absolute tolerance can't be negative: {0}".format(absolute_tolerance))
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError("relative tolerance can't be negative: {0}".format(absolute_tolerance))
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
def approx(expected, rel=None, abs=None, nan_ok=False):
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
Dictionary *values*::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
And ``numpy`` arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the ``nan_ok`` argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
.. warning::
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, ``TypeError`` is
raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
from collections import Mapping, Sequence
from _pytest.compat import STRING_TYPES as String
# Delegate the comparison to a class that knows how to deal with the type
# of the expected value (e.g. int, float, list, dict, numpy.array, etc).
#
# This architecture is really driven by the need to support numpy arrays.
# The only way to override `==` for arrays without requiring that approx be
# the left operand is to inherit the approx object from `numpy.ndarray`.
# But that can't be a general solution, because it requires (1) numpy to be
# installed and (2) the expected value to be a numpy array. So the general
# solution is to delegate each type of expected value to a different class.
#
# This has the advantage that it made it easy to support mapping types
# (i.e. dict). The old code accepted mapping types, but would only compare
# their keys, which is probably not what most people would expect.
if _is_numpy_array(expected):
cls = ApproxNumpy
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif isinstance(expected, Sequence) and not isinstance(expected, String):
cls = ApproxSequence
else:
cls = ApproxScalar
return cls(expected, rel, abs, nan_ok)
def _is_numpy_array(obj):
"""
Return true if the given object is a numpy array. Make a special effort to
avoid importing numpy unless it's really necessary.
"""
import inspect
for cls in inspect.getmro(type(obj)):
if cls.__module__ == 'numpy':
try:
import numpy as np
return isinstance(obj, np.ndarray)
except ImportError:
pass
return False
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
"""
Assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
This helper produces a ``ExceptionInfo()`` object (see below).
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. versionchanged:: 2.10
In the context manager form you may use the keyword argument
``message`` to specify a custom failure message::
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
... pass
Traceback (most recent call last):
...
Failed: Expecting ZeroDivisionError
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type == ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type == ValueError
Since version ``3.1`` you can use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
**Legacy forms**
The forms below are fully supported but are discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
It is also possible to pass a string to be evaluated at runtime::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
The string will be evaluated using the same ``locals()`` and ``globals()``
at the moment of the ``raises`` call.
.. autoclass:: _pytest._code.ExceptionInfo
:members:
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
message = "DID NOT RAISE {0}".format(expected_exception)
match_expr = None
if not args:
if "message" in kwargs:
message = kwargs.pop("message")
if "match" in kwargs:
match_expr = kwargs.pop("match")
message += " matching '{0}'".format(match_expr)
return RaisesContext(expected_exception, message, match_expr)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
# print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
fail(message)
raises.Exception = fail.Exception
class RaisesContext(object):
def __init__(self, expected_exception, message, match_expr):
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
fail(self.message)
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
if self.match_expr:
self.excinfo.match(self.match_expr)
return suppress_exception
| lgpl-2.1 |
gfonk/ansible | test/units/playbook/test_playbook.py | 290 | 2230 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml":"""
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
| gpl-3.0 |
MQQiang/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| lgpl-3.0 |
Dav3xor/mpc | stats.py | 1 | 4105 | from util import *
from codes import *
import operator
def dostats(households):
meter = spinner("Writing stats.txt",1)
with open('stats.txt', 'w') as stats:
meter.spin()
males,females = male_to_female(households)
stats.write("Percentage Male: %.1f\n" % males)
stats.write("Percentage Female: %.1f\n\n" % females)
meter.spin()
household_sizes = household_size(households)
stats.write("Average Household Size (by state):\n")
for key in household_sizes:
stats.write("%s: %.1f\n" % (reverse_states[key], household_sizes[key]))
stats.write("\n")
meter.spin()
farms = farm_households(households)
stats.write("Percentage of Households That Are Farms (by state):\n")
for key in farms:
stats.write("%s: %.1f\n" % (reverse_states[key], farms[key]))
stats.write("\n")
meter.spin()
male_names, female_names = common_names(households)
stats.write("Most Common First Names:\n")
stats.write(" Male:\n")
for name in male_names:
stats.write(" %s - %d\n" % (name[0],name[1]))
stats.write(" Male:\n")
for name in female_names:
stats.write(" %s - %d\n" % (name[0],name[1]))
stats.write("\n")
meter.spin()
areas = metropolitan_areas(households)
stats.write("Metropolitan Areas (estimated population):\n")
for key in areas:
area = areas[key]
stats.write("%-30s:%d\n" % (area['name'],area['population']))
stats.write("\n")
meter.spin()
meter.done()
def male_to_female(households):
males = 0
females = 0
total = 0
for household in households:
for person in household.people:
sex = person['SEX']
if sex=="1":
males += 1
total += 1
if sex=="2":
females += 1
total += 1
return (float(males)/float(total)*100.0,
float(females)/float(total)*100.0)
def household_size(households):
states = {}
for household in households:
people = int(household['NUMPREC'])
state = int(household['STATEFIP'])
if state not in states:
states[state] = []
states[state].append(people)
for state in states:
avg = float(sum(states[state]))/float(len(states[state]))
states[state] = avg
return states
def farm_households(households):
states = {}
for household in households:
farm = int(household['FARM'])
state = int(household['STATEFIP'])
if state not in states:
states[state] = [0,0]
states[state][0] += 1
if farm == 2:
states[state][1] += 1
for state in states:
percent = float(states[state][1])/float(states[state][0])*100.0
states[state] = percent
return states
def metropolitan_areas(households):
areas = {7040:{'name': 'St. Louis', 'population':0},
1120:{'name': 'Boston', 'population':0},
6160:{'name': 'Philadelphia', 'population':0} }
for household in households:
metro = int(household['METRO'])
area = int(household['METAREA'])
# if metro = 1, it's not actually in the metro area...
if metro != 1 and area in areas:
areas[area]['population'] += household.num_people()
# expand from 1% to 100%...
for area in areas:
areas[area]['population'] *= 100
return areas
def common_names(households):
male_names = {}
female_names = {}
for household in households:
for person in household.people:
name = person['NAMEFRST']
sex = person['SEX']
name = name.strip()
if len(name):
name = name.split()[0] # some names include middle initial?
if sex == "1":
if name not in male_names:
male_names[name] = 0
male_names[name] += 1
else:
if name not in female_names:
female_names[name] = 0
female_names[name] += 1
male_sorted = sorted(male_names.iteritems(), key = operator.itemgetter(1))
female_sorted = sorted(female_names.iteritems(), key = operator.itemgetter(1))
male_sorted.reverse()
female_sorted.reverse()
return male_sorted[:5], female_sorted[:5]
| mit |
chen0510566/MissionPlanner | Lib/site-packages/numpy/testing/tests/test_utils.py | 53 | 15034 | import warnings
import sys
import numpy as np
from numpy.testing import *
import unittest
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except AssertionError:
pass
else:
raise AssertionError("a and b are found equal but are not")
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=np.object)
self._test_equal(a, 1)
class TestArrayEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
self._test_not_equal(c, b)
class TestEqual(TestArrayEqual):
def setUp(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_almost_equal
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_almost_equal
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, 1))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, np.inf))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
class TestApproxEqual(unittest.TestCase):
def setUp(self):
self._assert_func = assert_approx_equal
def test_simple_arrays(self):
x = np.array([1234.22])
y = np.array([1234.23])
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
class TestRaises(unittest.TestCase):
def setUp(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
f = raises(self.e)(self.raises_exception)(self.e)
def test_wrong_exception(self):
try:
f = raises(self.e)(self.raises_exception)(RuntimeError)
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
f = raises(self.e)(self.does_not_raise_exception)()
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
before_filters = sys.modules['warnings'].filters[:]
assert_warns(UserWarning, f)
after_filters = sys.modules['warnings'].filters
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
class TestAssertAllclose(unittest.TestCase):
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
self.assertRaises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
self.assertRaises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
self.assertRaises(AssertionError, assert_allclose, a, b,
rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
class TestArrayAlmostEqualNulp(unittest.TestCase):
def test_simple(self):
dev = np.random.randn(10)
x = np.ones(10)
y = x + dev * np.finfo(np.float64).eps
assert_array_almost_equal_nulp(x, y, nulp=2 * np.max(dev))
def test_simple2(self):
x = np.random.randn(10)
y = 2 * x
def failure():
return assert_array_almost_equal_nulp(x, y,
nulp=1000)
self.assertRaises(AssertionError, failure)
def test_big_float32(self):
x = (1e10 * np.random.randn(10)).astype(np.float32)
y = x + 1
assert_array_almost_equal_nulp(x, y, nulp=1000)
def test_big_float64(self):
x = 1e10 * np.random.randn(10)
y = x + 1
def failure():
assert_array_almost_equal_nulp(x, y, nulp=1000)
self.assertRaises(AssertionError, failure)
def test_complex(self):
x = np.random.randn(10) + 1j * np.random.randn(10)
y = x + 1
def failure():
assert_array_almost_equal_nulp(x, y, nulp=1000)
self.assertRaises(AssertionError, failure)
def test_complex2(self):
x = np.random.randn(10)
y = np.array(x, np.complex) + 1e-16 * np.random.randn(10)
assert_array_almost_equal_nulp(x, y, nulp=1000)
class TestULP(unittest.TestCase):
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
# Test that nan is 'far' from small, tiny, inf, max and min
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
if __name__ == '__main__':
run_module_suite()
| gpl-3.0 |
shiblon/pytour | 3/tutorials/exercise_functions1.py | 1 | 1381 | # vim:tw=50
"""Exercise: Functions and If (1)
Now we have enough tools to do something more
interesting! Let's remind ourselves of how |if|
and **slicing** work.
For this and later exercises, you will fill in the
code marked |# TODO:| to make the tests pass.
Remember that you can use |[::-1]| to get a
reversed sequence using a slice.
First try running the code without changes. What
fails?
Note: we have included a special |_assert_equal|
function for testing. You would normally use the
|unittest| or |doctest| module for this, but it
is convenient in this tutorial to keep it simple.
Exercises
- Write the body for the function |reverse_a| by
replacing the |TODO| comment with real code. If
the string |s| starts with the letter |"a"|,
return it reversed. Otherwise return it
unchanged. You may want to use
|s.startswith('a')| instead of |s[0] == 'a'| so
that the function will also work on empty
strings.
"""
__doc__ = """Functions and branching exercise (1)
Make the assertion tests at the bottom of the file pass.
"""
def reverse_a(s):
"""Return s reversed if it starts with a, not reversed otherwise."""
# TODO: Fill this in.
print("No news is good news: if nothing prints below, you passed!")
_assert_equal("gniht yllis a", reverse_a("a silly thing"))
_assert_equal("not so silly", reverse_a("not so silly"))
_assert_equal("", reverse_a(""))
| apache-2.0 |
ckc6cz/osf.io | framework/bcrypt/__init__.py | 62 | 1564 | # Adapted from:
# Name: Flask-Bcrypt
# Version: 0.3.2
# Summary: Bcrypt support for hashing passwords
# Home-page: https://github.com/maxcountryman/flask-bcrypt
# Author: Max Countryman
# Author-email: maxc@me.com
# License: BSD
import bcrypt
from website import settings
def generate_password_hash(password, rounds=None):
'''Generates a password hash using `bcrypt`. Specifying `log_rounds` sets
the log_rounds parameter of `bcrypt.gensalt()` which determines the
complexity of the salt. 12 is the default value.
Returns the hashed password.
'''
if rounds is None:
rounds = settings.BCRYPT_LOG_ROUNDS
if not password:
raise ValueError('Password must be non-empty.')
pw_hash = bcrypt.hashpw(
unicode(password).encode('utf-8'),
bcrypt.gensalt(rounds)
)
return pw_hash
def constant_time_compare(val1, val2):
'''Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
'''
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def check_password_hash(pw_hash, password):
'''Checks a hashed password against a password.
Returns `True` if the password matched, `False` otherwise.
'''
return constant_time_compare(
bcrypt.hashpw(
unicode(password).encode('utf-8'),
unicode(pw_hash).encode('utf-8')
),
pw_hash
)
| apache-2.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-32-transformer/transformer/data_generators/generator_utils_test.py | 6 | 3771 | """Generator utilities test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import os
import tempfile
from builtins import bytes # pylint: disable=redefined-builtin
import tensorflow as tf
from data_generators import generator_utils
class GeneratorUtilsTest(tf.test.TestCase):
def testGenerateFiles(self):
tmp_dir = self.get_temp_dir()
(_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir)
tmp_file_name = os.path.basename(tmp_file_path)
# Generate a trivial file and assert the file exists.
def test_generator():
yield {"inputs": [1], "target": [1]}
filenames = generator_utils.train_data_filenames(tmp_file_name, tmp_dir, 1)
generator_utils.generate_files(test_generator(), filenames)
self.assertTrue(tf.gfile.Exists(tmp_file_path + "-train-00000-of-00001"))
# Clean up.
os.remove(tmp_file_path + "-train-00000-of-00001")
os.remove(tmp_file_path)
def testMaybeDownload(self):
tmp_dir = self.get_temp_dir()
(_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir)
tmp_file_name = os.path.basename(tmp_file_path)
# Download Google index to the temporary file.http.
res_path = generator_utils.maybe_download(tmp_dir, tmp_file_name + ".http",
"http://google.com")
self.assertEqual(res_path, tmp_file_path + ".http")
# Clean up.
os.remove(tmp_file_path + ".http")
os.remove(tmp_file_path)
def testMaybeDownloadFromDrive(self):
tmp_dir = self.get_temp_dir()
(_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir)
tmp_file_name = os.path.basename(tmp_file_path)
# Download Google index to the temporary file.http.
res_path = generator_utils.maybe_download_from_drive(
tmp_dir, tmp_file_name + ".http", "http://drive.google.com")
self.assertEqual(res_path, tmp_file_path + ".http")
# Clean up.
os.remove(tmp_file_path + ".http")
os.remove(tmp_file_path)
def testGunzipFile(self):
tmp_dir = self.get_temp_dir()
(_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir)
# Create a test zip file and unzip it.
with gzip.open(tmp_file_path + ".gz", "wb") as gz_file:
gz_file.write(bytes("test line", "utf-8"))
generator_utils.gunzip_file(tmp_file_path + ".gz", tmp_file_path + ".txt")
# Check that the unzipped result is as expected.
lines = []
for line in io.open(tmp_file_path + ".txt", "rb"):
lines.append(line.decode("utf-8").strip())
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], "test line")
# Clean up.
os.remove(tmp_file_path + ".gz")
os.remove(tmp_file_path + ".txt")
os.remove(tmp_file_path)
def testGetOrGenerateTxtVocab(self):
data_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
test_file = os.path.join(self.get_temp_dir(), "test.txt")
with tf.gfile.Open(test_file, "w") as outfile:
outfile.write("a b c\n")
outfile.write("d e f\n")
# Create a vocab over the test file.
vocab1 = generator_utils.get_or_generate_txt_vocab(
data_dir, "test.voc", 20, test_file)
self.assertTrue(tf.gfile.Exists(os.path.join(data_dir, "test.voc")))
self.assertIsNotNone(vocab1)
# Append a new line to the test file which would change the vocab if
# the vocab were not being read from file.
with tf.gfile.Open(test_file, "a") as outfile:
outfile.write("g h i\n")
vocab2 = generator_utils.get_or_generate_txt_vocab(
data_dir, "test.voc", 20, test_file)
self.assertTrue(tf.gfile.Exists(os.path.join(data_dir, "test.voc")))
self.assertIsNotNone(vocab2)
self.assertEqual(vocab1.dump(), vocab2.dump())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
stspyder/servo | tests/wpt/css-tests/tools/wptserve/tests/functional/test_response.py | 299 | 1931 | import os
import unittest
import urllib2
import json
import time
from types import MethodType
import wptserve
from base import TestUsingServer, doc_root
def send_body_as_header(self):
if self._response.add_required_headers:
self.write_default_headers()
self.write("X-Body: ")
self._headers_complete = True
class TestResponse(TestUsingServer):
def test_head_without_body(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.end_headers = MethodType(send_body_as_header,
response.writer,
wptserve.response.ResponseWriter)
return [("X-Test", "TEST")], "body\r\n"
route = ("GET", "/test/test_head_without_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="HEAD")
self.assertEquals("6", resp.info()['Content-Length'])
self.assertEquals("TEST", resp.info()['x-Test'])
self.assertEquals("", resp.info()['x-body'])
def test_head_with_body(self):
@wptserve.handlers.handler
def handler(request, response):
response.send_body_for_head_request = True
response.writer.end_headers = MethodType(send_body_as_header,
response.writer,
wptserve.response.ResponseWriter)
return [("X-Test", "TEST")], "body\r\n"
route = ("GET", "/test/test_head_with_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="HEAD")
self.assertEquals("6", resp.info()['Content-Length'])
self.assertEquals("TEST", resp.info()['x-Test'])
self.assertEquals("body", resp.info()['X-Body'])
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
heyavery/lopenr | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
BlindHunter/django | django/contrib/gis/gdal/field.py | 355 | 6739 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieves the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| bsd-3-clause |
remotesyssupport/cobbler-1 | cobbler/modules/authn_passthru.py | 14 | 1058 | """
Authentication module that defers to Apache and trusts
what Apache trusts.
Copyright 2008-2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import distutils.sysconfig
import sys
import os
from cobbler import utils
from utils import _
import traceback
plib = distutils.sysconfig.get_python_lib()
mod_path="%s/cobbler" % plib
sys.path.insert(0, mod_path)
import cexceptions
import utils
def register():
"""
The mandatory cobbler module registration hook.
"""
return "authn"
def authenticate(api_handle,username,password):
"""
Validate a username/password combo, returning True/False
Uses cobbler_auth_helper
"""
ss = utils.get_shared_secret()
if password == ss:
rc = True
else:
rc = False
return rc
| gpl-2.0 |
Wolfterro/SVD | src/old/1.0/MessageBoxClass.py | 3 | 1852 | # -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2017 Wolfgang Almeida <wolfgang.almeida@yahoo.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Imports gerais
# ==============
from __future__ import print_function
from PyQt4 import QtCore, QtGui
import os
import sys
# Imports do programa
# ===================
from GlobalVars import GlobalVars
# Classe da caixa de mensagens
# ============================
class ShowMessageBox(object):
def show(self, title, iconType, text, infoText, closeType, closeVal):
msg = QtGui.QMessageBox()
msg.setIcon(iconType)
msg.setWindowIcon(QtGui.QIcon(GlobalVars.IconPath))
msg.setText(text)
msg.setInformativeText(infoText)
msg.setWindowTitle(title)
msg.setStandardButtons(closeType)
msg.exec_()
if closeVal != 0:
sys.exit(closeVal) | mit |
wangxiangyu/horizon | openstack_dashboard/dashboards/admin/hypervisors/tests.py | 12 | 4709 | # Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class HypervisorViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('extension_supported',
'hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_index(self):
hypervisors = self.hypervisors.list()
services = self.services.list()
stats = self.hypervisors.stats
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
api.nova.service_list(IsA(http.HttpRequest)).AndReturn(services)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertTemplateUsed(res, 'admin/hypervisors/index.html')
hypervisors_tab = res.context['tab_group'].get_tab('hypervisor')
self.assertItemsEqual(hypervisors_tab._tables['hypervisors'].data,
hypervisors)
host_tab = res.context['tab_group'].get_tab('compute_host')
host_table = host_tab._tables['compute_host']
compute_services = [service for service in services
if service.binary == 'nova-compute']
self.assertItemsEqual(host_table.data, compute_services)
actions_host_up = host_table.get_row_actions(host_table.data[0])
self.assertEqual(1, len(actions_host_up))
actions_host_down = host_table.get_row_actions(host_table.data[1])
self.assertEqual(2, len(actions_host_down))
self.assertEqual('evacuate', actions_host_down[0].name)
actions_service_enabled = host_table.get_row_actions(
host_table.data[1])
self.assertEqual('evacuate', actions_service_enabled[0].name)
self.assertEqual('disable', actions_service_enabled[1].name)
actions_service_disabled = host_table.get_row_actions(
host_table.data[2])
self.assertEqual('enable', actions_service_disabled[0].name)
self.assertEqual('migrate_maintenance',
actions_service_disabled[1].name)
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_service_list_unavailable(self):
"""test that error message should be returned when
nova.service_list isn't available
"""
hypervisors = self.hypervisors.list()
stats = self.hypervisors.stats
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
api.nova.service_list(IsA(http.HttpRequest)).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
resp = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertMessageCount(resp, error=1, warning=0)
class HypervisorDetailViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_search',)})
def test_index(self):
hypervisor = self.hypervisors.first()
api.nova.hypervisor_search(
IsA(http.HttpRequest),
hypervisor.hypervisor_hostname).AndReturn([
hypervisor,
self.hypervisors.list()[1]])
self.mox.ReplayAll()
url = reverse('horizon:admin:hypervisors:detail',
args=["%s_%s" % (hypervisor.id,
hypervisor.hypervisor_hostname)])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/hypervisors/detail.html')
self.assertItemsEqual(res.context['table'].data, hypervisor.servers)
| apache-2.0 |
russomi/appengine-pipeline-read-only | demo/pipeline/simplejson/ordered_dict.py | 8 | 3391 | #!/usr/bin/python2.5
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| apache-2.0 |
dbrgn/notam-parse | notam/utils.py | 1 | 1864 | def indent(string, level=1, lstrip_first=False):
"""Multiline string indent.
Indent each line of the provided string by the specified level.
Args:
string: The string to indent, possibly containing linebreaks.
level: The level to indent (``level * ' '``). Defaults to 1.
lstrip_first: If this is `True`, then the first line is not indented.
Defaults to `False`.
Returns:
Indented string.
"""
out = '\n'.join((level * ' ') + i for i in string.splitlines())
if lstrip_first:
return out.lstrip()
return out
def is_namedtuple(x):
"""Return whether ``x`` is an instance of a namedtuple."""
return isinstance(x, tuple) and hasattr(x, '_fields')
def print_ast(tree, level=0, inline=False):
"""Recursive function to print the AST.
Args:
tree: An abstract syntax tree consisting of tuples, namedtuples and other objects.
level: The indent level, used for the recursive calls.
inline: Whether or not to indent the first line.
Returns:
Nothing. The AST is printed directly to stdout.
"""
if is_namedtuple(tree):
print(indent('{0.__class__.__name__}('.format(tree), level, inline))
for key, value in tree._asdict().items():
print(indent(key + '=', level + 1), end='')
print_ast(value, level + 1, True)
print(indent(')', level))
elif isinstance(tree, (tuple, list)):
if isinstance(tree, tuple):
braces = '()'
else:
braces = '[]'
if len(tree) == 0:
print(braces)
else:
print(indent(braces[0], level, inline))
for obj in tree:
print_ast(obj, level + 1)
print(indent(braces[1], level))
else:
print(indent(repr(tree), level, inline))
| mit |
RevelSystems/django | django/templatetags/future.py | 40 | 2034 | import warnings
from django.template import Library, defaulttags
from django.utils.deprecation import RemovedInDjango20Warning
register = Library()
@register.tag
def cycle(parser, token):
"""
This is the future version of `cycle` with auto-escaping.
The deprecation is now complete and this version is no different
from the non-future version so this is deprecated.
By default all strings are escaped.
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% cycle var1 var2 var3 as somecycle %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% cycle var1 var2|safe var3|safe as somecycle %}
"""
warnings.warn(
"Loading the `cycle` tag from the `future` library is deprecated and "
"will be removed in Django 2.0. Use the default `cycle` tag instead.",
RemovedInDjango20Warning)
return defaulttags.cycle(parser, token)
@register.tag
def firstof(parser, token):
"""
This is the future version of `firstof` with auto-escaping.
The deprecation is now complete and this version is no different
from the non-future version so this is deprecated.
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %}
"""
warnings.warn(
"Loading the `firstof` tag from the `future` library is deprecated and "
"will be removed in Django 2.0. Use the default `firstof` tag instead.",
RemovedInDjango20Warning)
return defaulttags.firstof(parser, token)
| bsd-3-clause |
jvoegele/picard | picard/ui/infostatus.py | 5 | 2770 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QIcon
from picard.util import icontheme
from picard.ui.ui_infostatus import Ui_InfoStatus
class InfoStatus(QtGui.QWidget, Ui_InfoStatus):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
Ui_InfoStatus.__init__(self)
self.setupUi(self)
self._size = QtCore.QSize(16, 16)
self._create_icons()
self._init_labels()
def _init_labels(self):
size = self._size
self.label1.setPixmap(self.icon_file.pixmap(size))
self.label2.setPixmap(self.icon_cd.pixmap(size))
self.label3.setPixmap(self.icon_file_pending.pixmap(size))
self.label4.setPixmap(self.icon_download.pixmap(size, QIcon.Disabled))
self._init_tooltips()
def _create_icons(self):
self.icon_cd = icontheme.lookup('media-optical')
self.icon_file = QtGui.QIcon(":/images/file.png")
self.icon_file_pending = QtGui.QIcon(":/images/file-pending.png")
self.icon_download = QtGui.QIcon(":/images/16x16/action-go-down-16.png")
def _init_tooltips(self):
t1 = _("Files")
t2 = _("Albums")
t3 = _("Pending files")
t4 = _("Pending requests")
self.val1.setToolTip(t1)
self.label1.setToolTip(t1)
self.val2.setToolTip(t2)
self.label2.setToolTip(t2)
self.val3.setToolTip(t3)
self.label3.setToolTip(t3)
self.val4.setToolTip(t4)
self.label4.setToolTip(t4)
def setFiles(self, num):
self.val1.setText(unicode(num))
def setAlbums(self, num):
self.val2.setText(unicode(num))
def setPendingFiles(self, num):
self.val3.setText(unicode(num))
def setPendingRequests(self, num):
if num <= 0:
enabled = QIcon.Disabled
else:
enabled = QIcon.Normal
self.label4.setPixmap(self.icon_download.pixmap(self._size, enabled))
self.val4.setText(unicode(num))
| gpl-2.0 |
Yong-Lee/django | django/core/management/commands/diffsettings.py | 479 | 1565 | from django.core.management.base import BaseCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Converts a module namespace to a Python dictionary."""
return {k: repr(v) for k, v in module.__dict__.items() if not omittable(k)}
class Command(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', dest='all', default=False,
help='Display all settings, regardless of their value. '
'Default values are prefixed by "###".')
def handle(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| bsd-3-clause |
Frenzie/youtube-dl | youtube_dl/extractor/facebook.py | 3 | 7506 | from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
limit_length,
urlencode_postdata,
get_element_by_id,
clean_html,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:
(?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=|
[^/]+/videos/(?:[^/]+/)?
)
(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
]
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}]
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
login_page_req.add_header('Cookie', 'locale=en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
'h': self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % compat_str(err))
return
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
webpage = self._download_webpage(url, video_id)
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
if not m:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
raise ExtractorError('Cannot parse data')
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse_unquote(data['params'])
params = json.loads(params_raw)
formats = []
for format_id, f in params['video_data'].items():
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'preference': -10 if format_id == 'progressive' else 0,
})
if not formats:
raise ExtractorError('Cannot find video formats')
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', fatal=False)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
return {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
}
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.