repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
xieshenglin/shadowsocks | shadowsocks/common.py | 1 | 11907 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
connect_log = logging.debug
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def int32(x):
if x > 0xFFFFFFFF or x < 0:
x &= 0xFFFFFFFF
if x > 0x7FFFFFFF:
x = int(0x100000000 - x)
if x < 0x80000000:
return -x
else:
return -2147483648
return x
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = ['%02X' % ord(x) for x in v4addr]
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def pre_parse_header(data):
if not data:
return None
datatype = ord(data[0])
if datatype == 0x80:
if len(data) <= 2:
return None
rand_data_size = ord(data[1])
if rand_data_size + 2 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 2:]
elif datatype == 0x81:
data = data[1:]
elif datatype == 0x82:
if len(data) <= 3:
return None
rand_data_size = struct.unpack('>H', data[1:3])[0]
if rand_data_size + 3 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 3:]
elif datatype == 0x88 or (~datatype & 0xff) == 0x88:
if len(data) <= 7 + 7:
return None
data_size = struct.unpack('>H', data[1:3])[0]
ogn_data = data
data = data[:data_size]
crc = binascii.crc32(data) & 0xffffffff
if crc != 0xffffffff:
logging.warn('uncorrect CRC32, maybe wrong password or '
'encryption method')
return None
start_pos = 3 + ord(data[3])
data = data[start_pos:-4]
if data_size < len(ogn_data):
data += ogn_data[data_size:]
return data
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
connecttype = (addrtype & 0x8) and 1 or 0
addrtype &= ~0x8
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return connecttype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self.addrs_str = addrs
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def __cmp__(self, other):
return cmp(self.addrs_str, other.addrs_str)
class PortRange(object):
def __init__(self, range_str):
self.range_str = to_str(range_str)
self.range = set()
range_str = to_str(range_str).split(',')
for item in range_str:
try:
int_range = item.split('-')
if len(int_range) == 1:
if item:
self.range.add(int(item))
elif len(int_range) == 2:
int_range[0] = int(int_range[0])
int_range[1] = int(int_range[1])
if int_range[0] < 0:
int_range[0] = 0
if int_range[1] > 65535:
int_range[1] = 65535
i = int_range[0]
while i <= int_range[1]:
self.range.add(i)
i += 1
except Exception as e:
logging.error(e)
def __contains__(self, val):
return val in self.range
def __cmp__(self, other):
return cmp(self.range_str, other.range_str)
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(0, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(0, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(0, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
lamby/pkg-fabric | tests/server.py | 34 | 16145 | from __future__ import with_statement
import copy
import itertools
import os
import re
import socket
import stat
import sys
import threading
import time
import types
from StringIO import StringIO
from functools import wraps
from Python26SocketServer import BaseRequestHandler, ThreadingMixIn, TCPServer
from fabric.operations import _sudo_prefix
from fabric.api import env, hide
from fabric.thread_handling import ThreadHandler
from fabric.network import disconnect_all, ssh
from fake_filesystem import FakeFilesystem, FakeFile
#
# Debugging
#
import logging
logging.basicConfig(filename='/tmp/fab.log', level=logging.DEBUG)
logger = logging.getLogger('server.py')
#
# Constants
#
HOST = '127.0.0.1'
PORT = 2200
USER = 'username'
HOME = '/'
RESPONSES = {
"ls /simple": "some output",
"ls /": """AUTHORS
FAQ
Fabric.egg-info
INSTALL
LICENSE
MANIFEST
README
build
docs
fabfile.py
fabfile.pyc
fabric
requirements.txt
setup.py
tests""",
"both_streams": [
"stdout",
"stderr"
],
}
FILES = FakeFilesystem({
'/file.txt': 'contents',
'/file2.txt': 'contents2',
'/folder/file3.txt': 'contents3',
'/empty_folder': None,
'/tree/file1.txt': 'x',
'/tree/file2.txt': 'y',
'/tree/subfolder/file3.txt': 'z',
'/etc/apache2/apache2.conf': 'Include other.conf',
HOME: None # So $HOME is a directory
})
PASSWORDS = {
'root': 'root',
USER: 'password'
}
def _local_file(filename):
return os.path.join(os.path.dirname(__file__), filename)
SERVER_PRIVKEY = _local_file('private.key')
CLIENT_PUBKEY = _local_file('client.key.pub')
CLIENT_PRIVKEY = _local_file('client.key')
CLIENT_PRIVKEY_PASSPHRASE = "passphrase"
def _equalize(lists, fillval=None):
"""
Pad all given list items in ``lists`` to be the same length.
"""
lists = map(list, lists)
upper = max(len(x) for x in lists)
for lst in lists:
diff = upper - len(lst)
if diff:
lst.extend([fillval] * diff)
return lists
class TestServer(ssh.ServerInterface):
"""
Test server implementing the 'ssh' lib's server interface parent class.
Mostly just handles the bare minimum necessary to handle SSH-level things
such as honoring authentication types and exec/shell/etc requests.
The bulk of the actual server side logic is handled in the
``serve_responses`` function and its ``SSHHandler`` class.
"""
def __init__(self, passwords, home, pubkeys, files):
self.event = threading.Event()
self.passwords = passwords
self.pubkeys = pubkeys
self.files = FakeFilesystem(files)
self.home = home
self.command = None
def check_channel_request(self, kind, chanid):
if kind == 'session':
return ssh.OPEN_SUCCEEDED
return ssh.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_exec_request(self, channel, command):
self.command = command
self.event.set()
return True
def check_channel_pty_request(self, *args):
return True
def check_channel_shell_request(self, channel):
self.event.set()
return True
def check_auth_password(self, username, password):
self.username = username
passed = self.passwords.get(username) == password
return ssh.AUTH_SUCCESSFUL if passed else ssh.AUTH_FAILED
def check_auth_publickey(self, username, key):
self.username = username
return ssh.AUTH_SUCCESSFUL if self.pubkeys else ssh.AUTH_FAILED
def get_allowed_auths(self, username):
return 'password,publickey'
class SSHServer(ThreadingMixIn, TCPServer):
"""
Threading TCPServer subclass.
"""
def _socket_info(self, addr_tup):
"""
Clone of the very top of Paramiko 1.7.6 SSHClient.connect().
We must use this in order to make sure that our address family matches
up with the client side (which we cannot control, and which varies
depending on individual computers and their network settings).
"""
hostname, port = addr_tup
addr_info = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
for (family, socktype, proto, canonname, sockaddr) in addr_info:
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
break
else:
# some OS like AIX don't indicate SOCK_STREAM support, so just
# guess. :(
af, _, _, _, addr = socket.getaddrinfo(hostname, port,
socket.AF_UNSPEC, socket.SOCK_STREAM)
return af, addr
def __init__(
self, server_address, RequestHandlerClass, bind_and_activate=True
):
# Prevent "address already in use" errors when running tests 2x in a
# row.
self.allow_reuse_address = True
# Handle network family/host addr (see docstring for _socket_info)
family, addr = self._socket_info(server_address)
self.address_family = family
TCPServer.__init__(self, addr, RequestHandlerClass,
bind_and_activate)
class FakeSFTPHandle(ssh.SFTPHandle):
"""
Extremely basic way to get SFTPHandle working with our fake setup.
"""
def chattr(self, attr):
self.readfile.attributes = attr
return ssh.SFTP_OK
def stat(self):
return self.readfile.attributes
class PrependList(list):
def prepend(self, val):
self.insert(0, val)
def expand(path):
"""
'/foo/bar/biz' => ('/', 'foo', 'bar', 'biz')
'relative/path' => ('relative', 'path')
"""
# Base case
if path in ['', os.path.sep]:
return [path]
ret = PrependList()
directory, filename = os.path.split(path)
while directory and directory != os.path.sep:
ret.prepend(filename)
directory, filename = os.path.split(directory)
ret.prepend(filename)
# Handle absolute vs relative paths
ret.prepend(directory if directory == os.path.sep else '')
return ret
def contains(folder, path):
"""
contains(('a', 'b', 'c'), ('a', 'b')) => True
contains('a', 'b', 'c'), ('f',)) => False
"""
return False if len(path) >= len(folder) else folder[:len(path)] == path
def missing_folders(paths):
"""
missing_folders(['a/b/c']) => ['a', 'a/b', 'a/b/c']
"""
ret = []
pool = set(paths)
for path in paths:
expanded = expand(path)
for i in range(len(expanded)):
folder = os.path.join(*expanded[:len(expanded) - i])
if folder and folder not in pool:
pool.add(folder)
ret.append(folder)
return ret
def canonicalize(path, home):
ret = path
if not os.path.isabs(path):
ret = os.path.normpath(os.path.join(home, path))
return ret
class FakeSFTPServer(ssh.SFTPServerInterface):
def __init__(self, server, *args, **kwargs):
self.server = server
files = self.server.files
# Expand such that omitted, implied folders get added explicitly
for folder in missing_folders(files.keys()):
files[folder] = None
self.files = files
def canonicalize(self, path):
"""
Make non-absolute paths relative to $HOME.
"""
return canonicalize(path, self.server.home)
def list_folder(self, path):
path = self.files.normalize(path)
expanded_files = map(expand, self.files)
expanded_path = expand(path)
candidates = [x for x in expanded_files if contains(x, expanded_path)]
children = []
for candidate in candidates:
cut = candidate[:len(expanded_path) + 1]
if cut not in children:
children.append(cut)
results = [self.stat(os.path.join(*x)) for x in children]
bad = not results or any(x == ssh.SFTP_NO_SUCH_FILE for x in results)
return ssh.SFTP_NO_SUCH_FILE if bad else results
def open(self, path, flags, attr):
path = self.files.normalize(path)
try:
fobj = self.files[path]
except KeyError:
if flags & os.O_WRONLY:
# Only allow writes to files in existing directories.
if os.path.dirname(path) not in self.files:
return ssh.SFTP_NO_SUCH_FILE
self.files[path] = fobj = FakeFile("", path)
# No write flag means a read, which means they tried to read a
# nonexistent file.
else:
return ssh.SFTP_NO_SUCH_FILE
f = FakeSFTPHandle()
f.readfile = f.writefile = fobj
return f
def stat(self, path):
path = self.files.normalize(path)
try:
fobj = self.files[path]
except KeyError:
return ssh.SFTP_NO_SUCH_FILE
return fobj.attributes
# Don't care about links right now
lstat = stat
def chattr(self, path, attr):
path = self.files.normalize(path)
if path not in self.files:
return ssh.SFTP_NO_SUCH_FILE
# Attempt to gracefully update instead of overwrite, since things like
# chmod will call us with an SFTPAttributes object that only exhibits
# e.g. st_mode, and we don't want to lose our filename or size...
for which in "size uid gid mode atime mtime".split():
attname = "st_" + which
incoming = getattr(attr, attname)
if incoming is not None:
setattr(self.files[path].attributes, attname, incoming)
return ssh.SFTP_OK
def mkdir(self, path, attr):
self.files[path] = None
return ssh.SFTP_OK
def serve_responses(responses, files, passwords, home, pubkeys, port):
"""
Return a threading TCP based SocketServer listening on ``port``.
Used as a fake SSH server which will respond to commands given in
``responses`` and allow connections for users listed in ``passwords``.
``home`` is used as the remote $HOME (mostly for SFTP purposes).
``pubkeys`` is a Boolean value determining whether the server will allow
pubkey auth or not.
"""
# Define handler class inline so it can access serve_responses' args
class SSHHandler(BaseRequestHandler):
def handle(self):
try:
self.init_transport()
self.waiting_for_command = False
while not self.server.all_done.isSet():
# Don't overwrite channel if we're waiting for a command.
if not self.waiting_for_command:
self.channel = self.transport.accept(1)
if not self.channel:
continue
self.ssh_server.event.wait(10)
if self.ssh_server.command:
self.command = self.ssh_server.command
# Set self.sudo_prompt, update self.command
self.split_sudo_prompt()
if self.command in responses:
self.stdout, self.stderr, self.status = \
self.response()
if self.sudo_prompt and not self.sudo_password():
self.channel.send(
"sudo: 3 incorrect password attempts\n"
)
break
self.respond()
else:
self.channel.send_stderr(
"Sorry, I don't recognize that command.\n"
)
self.channel.send_exit_status(1)
# Close up shop
self.command = self.ssh_server.command = None
self.waiting_for_command = False
time.sleep(0.5)
self.channel.close()
else:
# If we're here, self.command was False or None,
# but we do have a valid Channel object. Thus we're
# waiting for the command to show up.
self.waiting_for_command = True
finally:
self.transport.close()
def init_transport(self):
transport = ssh.Transport(self.request)
transport.add_server_key(ssh.RSAKey(filename=SERVER_PRIVKEY))
transport.set_subsystem_handler('sftp', ssh.SFTPServer,
sftp_si=FakeSFTPServer)
server = TestServer(passwords, home, pubkeys, files)
transport.start_server(server=server)
self.ssh_server = server
self.transport = transport
def split_sudo_prompt(self):
prefix = re.escape(_sudo_prefix(None, None).rstrip()) + ' +'
result = re.findall(r'^(%s)?(.*)$' % prefix, self.command)[0]
self.sudo_prompt, self.command = result
def response(self):
result = responses[self.command]
stderr = ""
status = 0
sleep = 0
if isinstance(result, types.StringTypes):
stdout = result
else:
size = len(result)
if size == 1:
stdout = result[0]
elif size == 2:
stdout, stderr = result
elif size == 3:
stdout, stderr, status = result
elif size == 4:
stdout, stderr, status, sleep = result
stdout, stderr = _equalize((stdout, stderr))
time.sleep(sleep)
return stdout, stderr, status
def sudo_password(self):
# Give user 3 tries, as is typical
passed = False
for x in range(3):
self.channel.send(env.sudo_prompt)
password = self.channel.recv(65535).strip()
# Spit back newline to fake the echo of user's
# newline
self.channel.send('\n')
# Test password
if password == passwords[self.ssh_server.username]:
passed = True
break
# If here, password was bad.
self.channel.send("Sorry, try again.\n")
return passed
def respond(self):
for out, err in zip(self.stdout, self.stderr):
if out is not None:
self.channel.send(out)
if err is not None:
self.channel.send_stderr(err)
self.channel.send_exit_status(self.status)
return SSHServer((HOST, port), SSHHandler)
def server(
responses=RESPONSES,
files=FILES,
passwords=PASSWORDS,
home=HOME,
pubkeys=False,
port=PORT
):
"""
Returns a decorator that runs an SSH server during function execution.
Direct passthrough to ``serve_responses``.
"""
def run_server(func):
@wraps(func)
def inner(*args, **kwargs):
# Start server
_server = serve_responses(responses, files, passwords, home,
pubkeys, port)
_server.all_done = threading.Event()
worker = ThreadHandler('server', _server.serve_forever)
# Execute function
try:
return func(*args, **kwargs)
finally:
# Clean up client side connections
with hide('status'):
disconnect_all()
# Stop server
_server.all_done.set()
_server.shutdown()
# Why this is not called in shutdown() is beyond me.
_server.server_close()
worker.thread.join()
# Handle subthread exceptions
e = worker.exception
if e:
raise e[0], e[1], e[2]
return inner
return run_server
| bsd-2-clause |
kirmani/hlpr_cadence | third_party/vector_v1/vector_common/vector_ros/src/vector/vector_teleop.py | 2 | 12505 | """--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file vector_teleop.py
\brief This module contains a class for teleoperating the vector
platform with a joystick controller
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
from utils import *
from system_defines import *
from vector_msgs.msg import *
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool,Float64
import rospy
import sys
import math
"""
mapping for controller order is dtz_request, powerdown_request, standby_request, tractor_request, balance_request, audio_request,
deadman_input, manual_ovvrd_input, twist_linear_x_input, twist_linear_y_input, twist_angular_z_input
"""
MAP_DTZ_IDX = 0
MAP_PWRDWN_IDX = 1
MAP_STANDBY_IDX = 2
MAP_TRACTOR_IDX = 3
MAP_BALANCE_IDX = 4
MAP_AUDIO_IDX = 5
MAP_REC_GOAL_IDX = 6
MAP_DEADMAN_IDX = 7
MAP_MAN_OVVRD_IDX = 8
NUMBER_OF_MOMENTARY_INPUTS = 9
MAP_TWIST_LIN_X_IDX = 0
MAP_TWIST_LIN_Y_IDX = 1
MAP_TWIST_ANG_Z_IDX = 2
NUMBER_OF_AXIS_INPUTS = 3
class VectorTeleop:
def __init__(self):
self.is_sim = rospy.get_param('~sim',False)
if (False == self.is_sim):
"""
Subscribe to the configuration message
"""
self.config_updated = False
rospy.Subscriber("/vector/feedback/active_configuration", Configuration, self._update_configuration_limits)
start_time = rospy.get_time()
while ((rospy.get_time() - start_time) < 10.0) and (False == self.config_updated):
rospy.sleep(0.05)
if (False == self.config_updated):
rospy.logerr("Timed out waiting for Vector feedback topics make sure the driver is running")
sys.exit(0)
return
else:
self.vel_limit_mps = rospy.get_param('~sim_teleop_x_vel_limit_mps',0.5)
self.vel_limit_mps = rospy.get_param('~sim_teleop_y_vel_limit_mps',0.5)
self.yaw_rate_limit_rps = rospy.get_param('~sim_teleop_yaw_rate_limit_rps',0.5)
self.accel_lim = rospy.get_param('~sim_teleop_accel_lim',0.5)
self.yaw_accel_lim = rospy.get_param('~sim_teleop_yaw_accel_lim',1.0)
default_ctrl_map = dict({'momentary':[[{'is_button':True,'index':4,'set_val':1}],
[{'is_button':True,'index':8,'set_val':1}],
[{'is_button':True,'index':1,'set_val':1}],
[{'is_button':True,'index':2,'set_val':1}],
[{'is_button':True,'index':0,'set_val':1}],
[{'is_button':False,'index':6,'invert_axis':False,'set_thresh':0.9}],
[{'is_button':False,'index':7,'invert_axis':True,'set_thresh':0.9}],
[{'is_button':False,'index':2,'invert_axis':True,'set_thresh':0.9}],
[{'is_button':False,'index':5,'invert_axis':True,'set_thresh':0.9}]],
'axis_range':[{'index':1,'invert_axis':False},
{'index':0,'invert_axis':False},
{'index':3,'invert_axis':False}]})
"""
Get the mapping for the various commands, defaults are xbox360 wireless
"""
self.ctrl_map = rospy.get_param('~controller_mapping',default_ctrl_map)
"""
Initialize the debounce logic states
"""
self.db_cnt = [0] * NUMBER_OF_MOMENTARY_INPUTS
self.button_state = [False] * NUMBER_OF_MOMENTARY_INPUTS
self.axis_value = [0.0] * NUMBER_OF_AXIS_INPUTS
self.send_cmd_none = False
self.no_motion_commands = True
self.last_motion_command_time = 0.0
self.last_joy = rospy.get_time()
self.cfg_cmd = ConfigCmd()
self.cfg_pub = rospy.Publisher('/vector/gp_command', ConfigCmd, queue_size=10)
self.goalrecorder_pub = rospy.Publisher('/vector/record_pose',Bool, queue_size=10)
self.motion_cmd = Twist()
self.limited_cmd = Twist()
self.motion_pub = rospy.Publisher('/vector/teleop/cmd_vel', Twist, queue_size=10)
self.override_pub = rospy.Publisher("/vector/manual_override/cmd_vel",Twist, queue_size=10)
rospy.Subscriber('/joy', Joy, self._vector_teleop)
def _update_configuration_limits(self,config):
self.x_vel_limit_mps = config.teleop_x_vel_limit_mps
self.y_vel_limit_mps = config.teleop_y_vel_limit_mps
self.yaw_rate_limit_rps = config.teleop_yaw_rate_limit_rps
self.accel_lim = config.teleop_accel_limit_mps2
self.yaw_accel_lim = config.teleop_yaw_accel_limit_rps2
self.config_updated = True
def _parse_joy_input(self,joyMessage):
raw_button_states = [True] * NUMBER_OF_MOMENTARY_INPUTS
self.button_state = [False] * NUMBER_OF_MOMENTARY_INPUTS
for i in range(NUMBER_OF_MOMENTARY_INPUTS):
inputs_for_req = self.ctrl_map['momentary'][i]
for item in inputs_for_req:
if item['is_button']:
if item['set_val'] == joyMessage.buttons[item['index']]:
raw_button_states[i] &= True
else:
raw_button_states[i] = False
else:
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
if (temp >= item['set_thresh']):
raw_button_states[i] &= True
else:
raw_button_states[i] = False
if (True == raw_button_states[i]):
self.db_cnt[i]+=1
if (self.db_cnt[i] > 10):
self.db_cnt[i] = 10
self.button_state[i] = True
else:
self.button_state[i] = False
self.db_cnt[i] = 0
self.axis_value = [0.0] * NUMBER_OF_AXIS_INPUTS
for i in range(NUMBER_OF_AXIS_INPUTS):
axis_input_map = self.ctrl_map['axis_range'][i]
temp = joyMessage.axes[axis_input_map['index']]
if (axis_input_map['invert_axis']):
temp *= -1.0
self.axis_value[i] = temp
def _vector_teleop(self, joyMessage):
self._parse_joy_input(joyMessage)
if self.button_state[MAP_REC_GOAL_IDX] == 1:
if (False == self.goalrecorded):
temp = Bool()
temp.data = True
self.goalrecorder_pub.publish(temp)
self.goalrecorded= True
else:
self.goalrecorded= False
if self.button_state[MAP_DTZ_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = DTZ_REQUEST
elif self.button_state[MAP_PWRDWN_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state[MAP_STANDBY_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state[MAP_TRACTOR_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = TRACTOR_REQUEST
else:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_NONE'
self.cfg_cmd.gp_param = 0
if ('GENERAL_PURPOSE_CMD_NONE' != self.cfg_cmd.gp_cmd):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = True
elif (True == self.send_cmd_none):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = False
elif (False == self.send_cmd_none):
if self.button_state[MAP_DEADMAN_IDX]:
self.motion_cmd.linear.x = (self.axis_value[MAP_TWIST_LIN_X_IDX] * self.x_vel_limit_mps)
self.motion_cmd.linear.y = (self.axis_value[MAP_TWIST_LIN_Y_IDX] * self.y_vel_limit_mps)
self.motion_cmd.angular.z = (self.axis_value[MAP_TWIST_ANG_Z_IDX] * self.yaw_rate_limit_rps)
self.last_motion_command_time = rospy.get_time()
else:
self.motion_cmd.linear.x = 0.0
self.motion_cmd.linear.y = 0.0
self.motion_cmd.angular.z = 0.0
dt = rospy.get_time() - self.last_joy
self.last_joy = rospy.get_time()
if (dt >= 0.01):
self.limited_cmd.linear.x = slew_limit(self.motion_cmd.linear.x,
self.limited_cmd.linear.x,
self.accel_lim, dt)
self.limited_cmd.linear.y = slew_limit(self.motion_cmd.linear.y,
self.limited_cmd.linear.y,
self.accel_lim, dt)
self.limited_cmd.angular.z = slew_limit(self.motion_cmd.angular.z,
self.limited_cmd.angular.z,
self.yaw_accel_lim, dt)
if ((rospy.get_time() - self.last_motion_command_time) < 2.0):
self.motion_pub.publish(self.limited_cmd)
if self.button_state[MAP_DEADMAN_IDX] and self.button_state[MAP_MAN_OVVRD_IDX]:
self.override_pub.publish(self.motion_cmd)
| mit |
kevin-coder/tensorflow-fork | tensorflow/contrib/resampler/python/ops/resampler_ops_test.py | 23 | 10484 | # pylint: disable=g-bad-file-header
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for contrib.resampler.python.ops.resampler_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import resampler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _bilinearly_interpolate(data, x, y):
"""Performs bilinenar interpolation of grid data at user defined coordinates.
This interpolation function:
a) implicitly pads the input data with 0s.
b) returns 0 when sampling outside the (padded) image.
The effect is that the sampled signal smoothly goes to 0 outside the original
input domain, rather than producing a jump discontinuity at the image
boundaries.
Args:
data: numpy array of shape `[data_height, data_width]` containing data
samples assumed to be defined at the corresponding pixel coordinates.
x: numpy array of shape `[warp_height, warp_width]` containing x coordinates
at which interpolation will be performed.
y: numpy array of shape `[warp_height, warp_width]` containing y coordinates
at which interpolation will be performed.
Returns:
Numpy array of shape `[warp_height, warp_width]` containing interpolated
values.
"""
shape = x.shape
x = np.asarray(x) + 1
y = np.asarray(y) + 1
data = np.lib.pad(data, 1, "constant", constant_values=0)
x_0 = np.floor(x).astype(int)
x_1 = x_0 + 1
y_0 = np.floor(y).astype(int)
y_1 = y_0 + 1
x_0 = np.clip(x_0, 0, data.shape[1] - 1)
x_1 = np.clip(x_1, 0, data.shape[1] - 1)
y_0 = np.clip(y_0, 0, data.shape[0] - 1)
y_1 = np.clip(y_1, 0, data.shape[0] - 1)
i_a = data[y_0, x_0]
i_b = data[y_1, x_0]
i_c = data[y_0, x_1]
i_d = data[y_1, x_1]
w_a = (x_1 - x) * (y_1 - y)
w_b = (x_1 - x) * (y - y_0)
w_c = (x - x_0) * (y_1 - y)
w_d = (x - x_0) * (y - y_0)
samples = (w_a * i_a + w_b * i_b + w_c * i_c + w_d * i_d)
samples.reshape(shape)
return samples
def _make_warp(batch_size, warp_height, warp_width, dtype):
"""Creates batch of warping coordinates."""
x, y = np.meshgrid(np.linspace(0, warp_width - 1, warp_width),
np.linspace(0, warp_height - 1, warp_height))
warp = np.concatenate((x.reshape([warp_height, warp_width, 1]),
y.reshape([warp_height, warp_width, 1])), 2)
warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]),
[batch_size, 1, 1, 1])
warp += np.random.randn(*warp.shape)
return warp.astype(dtype)
class ResamplerTest(test.TestCase):
def test_op_forward_pass_gpu_float32(self):
self._test_op_forward_pass(True, dtypes.float32, 1e-4)
def test_op_forward_pass_gpu_float64(self):
self._test_op_forward_pass(True, dtypes.float64, 1e-5)
def test_op_forward_pass_cpu_float16(self):
self._test_op_forward_pass(False, dtypes.float16, 1e-2)
def test_op_forward_pass_cpu_float32(self):
self._test_op_forward_pass(False, dtypes.float32, 1e-4)
def test_op_forward_pass_cpu_float64(self):
self._test_op_forward_pass(False, dtypes.float64, 1e-5)
def test_op_backward_pass_gpu_float32(self):
self._test_op_backward_pass(True, dtypes.float32, 1e-3)
def test_op_backward_pass_cpu_float16(self):
self._test_op_backward_pass(False, dtypes.float16, 1e-3)
def test_op_backward_pass_cpu_float32(self):
self._test_op_backward_pass(False, dtypes.float32, 1e-4)
def test_op_backward_pass_cpu_float64(self):
self._test_op_backward_pass(False, dtypes.float64, 1e-6)
def _test_op_forward_pass(self, on_gpu, dtype, tol):
np.random.seed(0)
data_width = 7
data_height = 9
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False) as sess:
data_ph = array_ops.placeholder(dtype, shape=(None,) + data.shape[1:])
warp_ph = array_ops.placeholder(dtype, shape=(None,) + warp.shape[1:])
outputs = resampler.resampler(data=data_ph, warp=warp_ph)
self.assertEqual(outputs.get_shape().as_list(),
[None, warp_height, warp_width, data_channels])
out = sess.run(outputs, feed_dict={data_ph: data, warp_ph: warp})
# Generate reference output via bilinear interpolation in numpy
reference_output = np.zeros_like(out)
for batch in xrange(batch_size):
for c in xrange(data_channels):
reference_output[batch, :, :, c] = _bilinearly_interpolate(
data[batch, :, :, c],
warp[batch, :, :, 0],
warp[batch, :, :, 1])
self.assertAllClose(out, reference_output, rtol=tol, atol=tol)
def _test_op_backward_pass(self, on_gpu, dtype, tol):
np.random.seed(13)
data_width = 5
data_height = 4
data_channels = 3
warp_width = 2
warp_height = 6
batch_size = 3
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False):
data_tensor = constant_op.constant(data)
warp_tensor = constant_op.constant(warp)
output_tensor = resampler.resampler(data=data_tensor, warp=warp_tensor)
grads = test.compute_gradient([data_tensor, warp_tensor], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor, output_tensor.get_shape().as_list(), [data, warp])
if not on_gpu:
# On CPU we perform numerical differentiation at the best available
# precision, and compare against that. This is necessary for test to
# pass for float16.
data_tensor_64 = constant_op.constant(data, dtype=dtypes.float64)
warp_tensor_64 = constant_op.constant(warp, dtype=dtypes.float64)
output_tensor_64 = resampler.resampler(data=data_tensor_64,
warp=warp_tensor_64)
grads_64 = test.compute_gradient([data_tensor_64, warp_tensor_64], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor_64, output_tensor.get_shape().as_list(), [data, warp])
for g, g_64 in zip(grads, grads_64):
self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol)
else:
for g in grads:
self.assertLess(np.fabs(g[0] - g[1]).max(), tol)
def test_op_errors(self):
data_width = 7
data_height = 9
data_depth = 3
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
# Input data shape is not defined over a 2D grid, i.e. its shape is not like
# (batch_size, data_height, data_width, data_channels).
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_depth,
data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is currently "
"supported."):
sess.run(outputs)
# Warp tensor must be at least a matrix, with shape [batch_size, 2].
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size,)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"warp should be at least a matrix"):
sess.run(outputs)
# The batch size of the data and warp tensors must be the same.
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size+1, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Batch size of data and warp tensor"):
sess.run(outputs)
# The warp tensor must contain 2D coordinates, i.e. its shape last dimension
# must be 2.
with self.cached_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 3)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is supported, "
"warping"):
sess.run(outputs)
if __name__ == "__main__":
test.main()
| apache-2.0 |
vrutkovs/atomic-reactor | atomic_reactor/plugins/pre_distribution_scope.py | 3 | 3257 | """
Copyright (c) 2016 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
from atomic_reactor.constants import INSPECT_CONFIG
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.util import df_parser
class NothingToCheck(Exception):
pass
class DisallowedDistributionScope(Exception):
pass
class DistributionScopePlugin(PreBuildPlugin):
"""Apply distribution-scope rule.
Possible values for this label are defined here:
https://github.com/projectatomic/ContainerApplicationGenericLabels
They are:
(most restrictive)
- private
- authoritative-source-only
- restricted
- public
(least restrictive)
The rule we want to apply is to prevent images from having a
less restrictive scope than their parent images.
If the distribution-scope for this image is set to a less
restrictive value than the distribution-scope label inherited from
the parent image, fail the build.
"""
# Name of this plugin
key = 'distribution_scope'
# Exceptions from this plugin should fail the build
is_allowed_to_fail = False
# The label and its possible values.
# LABEL name used for specifying scope
SCOPE_LABEL = 'distribution-scope'
# Valid distribution-scope choice values, most restrictive first
SCOPE_NAME = [
"private",
"authoritative-source-only",
"restricted",
"public",
]
def get_scope(self, which, labels):
"""
:param which: str, description of the image this belongs to
:param labels: dict, labels on the image
"""
try:
scope_choice = labels[self.SCOPE_LABEL]
except (KeyError, TypeError):
self.log.debug("no distribution scope set for %s image", which)
raise NothingToCheck
try:
scope = self.SCOPE_NAME.index(scope_choice)
except ValueError:
self.log.warning("invalid label %s=%s for %s image",
self.SCOPE_LABEL, scope_choice, which)
raise NothingToCheck
return scope
def run(self):
try:
# Find out the intended scope for this image
labels = df_parser(self.workflow.builder.df_path, workflow=self.workflow).labels
scope = self.get_scope('current', labels)
# Find out the parent's intended scope
parent_labels = self.workflow.base_image_inspect[INSPECT_CONFIG]['Labels']
parent_scope = self.get_scope('parent', parent_labels)
except NothingToCheck:
self.log.debug("no checks performed")
return
if scope > parent_scope:
error = ("{label}={scope} but parent has {label}={parent_scope}"
.format(label=self.SCOPE_LABEL,
scope=self.SCOPE_NAME[scope],
parent_scope=self.SCOPE_NAME[parent_scope]))
self.log.error("%s", error)
raise DisallowedDistributionScope(error)
self.log.info("distribution scope checked")
| bsd-3-clause |
vascotenner/holoviews | holoviews/plotting/bokeh/raster.py | 1 | 6949 | import numpy as np
import param
from bokeh.models.mappers import LinearColorMapper
try:
from bokeh.models.mappers import LogColorMapper
except ImportError:
LogColorMapper = None
from ...core.util import cartesian_product
from ...element import Image, Raster, RGB
from ..renderer import SkipRendering
from ..util import map_colors
from .element import ElementPlot, line_properties, fill_properties
from .util import mplcmap_to_palette, get_cmap, hsv_to_rgb
class RasterPlot(ElementPlot):
logz = param.Boolean(default=False, doc="""
Whether to apply log scaling to the z-axis.""")
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
style_opts = ['cmap']
_plot_methods = dict(single='image')
_update_handles = ['color_mapper', 'source', 'glyph']
def __init__(self, *args, **kwargs):
super(RasterPlot, self).__init__(*args, **kwargs)
if self.hmap.type == Raster:
self.invert_yaxis = not self.invert_yaxis
def get_data(self, element, ranges=None, empty=False):
img = element.data
if isinstance(element, Image):
l, b, r, t = element.bounds.lbrt()
else:
l, b, r, t = element.extents
dh = t-b
if type(element) is Raster:
b = t
mapping = dict(image='image', x='x', y='y', dw='dw', dh='dh')
if empty:
data = dict(image=[], x=[], y=[], dw=[], dh=[])
else:
data = dict(image=[np.flipud(img)], x=[l],
y=[b], dw=[r-l], dh=[dh])
return (data, mapping)
def _glyph_properties(self, plot, element, source, ranges):
properties = super(RasterPlot, self)._glyph_properties(plot, element,
source, ranges)
properties = {k: v for k, v in properties.items()}
val_dim = [d.name for d in element.vdims][0]
low, high = ranges.get(val_dim)
if 'cmap' in properties:
palette = mplcmap_to_palette(properties.pop('cmap', None))
colormapper = LogColorMapper if self.logz else LinearColorMapper
cmap = colormapper(palette, low=low, high=high)
properties['color_mapper'] = cmap
if 'color_mapper' not in self.handles:
self.handles['color_mapper'] = cmap
return properties
def _update_glyph(self, glyph, properties, mapping):
allowed_properties = glyph.properties()
cmap = properties.pop('color_mapper', None)
if cmap:
glyph.color_mapper.low = cmap.low
glyph.color_mapper.high = cmap.high
merged = dict(properties, **mapping)
glyph.set(**{k: v for k, v in merged.items()
if k in allowed_properties})
class RGBPlot(RasterPlot):
style_opts = []
_plot_methods = dict(single='image_rgba')
def get_data(self, element, ranges=None, empty=False):
data, mapping = super(RGBPlot, self).get_data(element, ranges, empty)
img = data['image'][0]
if empty:
data['image'] = []
elif img.ndim == 3:
if img.shape[2] == 3: # alpha channel not included
alpha = np.ones(img.shape[:2])
if img.dtype.name == 'uint8':
alpha = (alpha*255).astype('uint8')
img = np.dstack([img, alpha])
if img.dtype.name != 'uint8':
img = (img*255).astype(np.uint8)
N, M, _ = img.shape
#convert image NxM dtype=uint32
img = img.view(dtype=np.uint32).reshape((N, M))
data['image'] = [img]
return data, mapping
def _glyph_properties(self, plot, element, source, ranges):
return ElementPlot._glyph_properties(self, plot, element,
source, ranges)
class HSVPlot(RGBPlot):
def get_data(self, element, ranges=None, empty=False):
rgb = RGB(hsv_to_rgb(element.data))
return super(HSVPlot, self).get_data(rgb, ranges, empty)
class HeatmapPlot(ElementPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
_plot_methods = dict(single='rect')
style_opts = ['cmap', 'color'] + line_properties + fill_properties
def _axes_props(self, plots, subplots, element, ranges):
dims = element.dimensions()
labels = self._get_axis_labels(dims)
xvals, yvals = [element.dimension_values(i, False)
for i in range(2)]
plot_ranges = {'x_range': [str(x) for x in xvals],
'y_range': [str(y) for y in yvals]}
return ('auto', 'auto'), labels, plot_ranges
def get_data(self, element, ranges=None, empty=False):
x, y, z = element.dimensions(label=True)
if empty:
data = {x: [], y: [], z: [], 'color': []}
else:
style = self.style[self.cyclic_index]
cmap = style.get('palette', style.get('cmap', None))
cmap = get_cmap(cmap)
zvals = np.rot90(element.raster, 3).flatten()
colors = map_colors(zvals, ranges[z], cmap)
xvals, yvals = [[str(v) for v in element.dimension_values(i)]
for i in range(2)]
data = {x: xvals, y: yvals, z: zvals, 'color': colors}
return (data, {'x': x, 'y': y, 'fill_color': 'color', 'height': 1, 'width': 1})
class QuadMeshPlot(ElementPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
_plot_methods = dict(single='rect')
style_opts = ['cmap', 'color'] + line_properties + fill_properties
def get_data(self, element, ranges=None, empty=False):
x, y, z = element.dimensions(label=True)
if empty:
data = {x: [], y: [], z: [], 'color': [], 'height': [], 'width': []}
else:
style = self.style[self.cyclic_index]
cmap = style.get('palette', style.get('cmap', None))
cmap = get_cmap(cmap)
if len(set(v.shape for v in element.data)) == 1:
raise SkipRendering("Bokeh QuadMeshPlot only supports rectangular meshes")
zvals = element.data[2].T.flatten()
colors = map_colors(zvals, ranges[z], cmap)
xvals = element.dimension_values(0, False)
yvals = element.dimension_values(1, False)
widths = np.diff(element.data[0])
heights = np.diff(element.data[1])
xs, ys = cartesian_product([xvals, yvals])
ws, hs = cartesian_product([widths, heights])
data = {x: xs.flat, y: ys.flat, z: zvals, 'color': colors,
'widths': ws.flat, 'heights': hs.flat}
return (data, {'x': x, 'y': y, 'fill_color': 'color',
'height': 'heights', 'width': 'widths'})
| bsd-3-clause |
laszlocsomor/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn.py | 190 | 1972 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that combines a decision forest embedding with a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class ForestToDataThenNN(hybrid_model.HybridModel):
"""A model that combines a decision forest embedding with a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(ForestToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [[decisions_to_data.KFeatureDecisionsToDataLayer(
params, i, device_assigner)
for i in range(self.params.num_trees)],
fully_connected.FullyConnectedLayer(
params,
self.params.num_trees,
device_assigner=device_assigner)]
| apache-2.0 |
ttsubo/ryu | ryu/services/protocols/vrrp/monitor_linux.py | 23 | 8400 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import socket
import struct
from ryu.controller import handler
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.lib import addrconv
from ryu.lib import hub
from ryu.lib.packet import arp
from ryu.lib.packet import vrrp
from ryu.services.protocols.vrrp import monitor
from ryu.services.protocols.vrrp import event as vrrp_event
from ryu.services.protocols.vrrp import utils
# Those are not defined in socket module
SS_MAXSIZE = 128
MCAST_JOIN_GROUP = 42
MCAST_LEAVE_GROUP = 45
PACKET_ADD_MEMBERSHIP = 1
PACKET_DROP_MEMBERSHIP = 2
PACKET_MR_MULTICAST = 0
SOL_PACKET = 263
def if_nametoindex(ifname):
filename = '/sys/class/net/' + ifname + '/ifindex'
with contextlib.closing(open(filename)) as f:
for line in f:
return int(line)
@monitor.VRRPInterfaceMonitor.register(vrrp_event.VRRPInterfaceNetworkDevice)
class VRRPInterfaceMonitorNetworkDevice(monitor.VRRPInterfaceMonitor):
"""
This module uses raw socket so that privilege(CAP_NET_ADMIN capability)
is required.
"""
def __init__(self, *args, **kwargs):
super(VRRPInterfaceMonitorNetworkDevice, self).__init__(*args,
**kwargs)
self.__is_active = True
config = self.config
if config.is_ipv6:
family = socket.AF_INET6
ether_type = ether.ETH_TYPE_IPV6
mac_address = vrrp.vrrp_ipv6_src_mac_address(config.vrid)
else:
family = socket.AF_INET
ether_type = ether.ETH_TYPE_IP
mac_address = vrrp.vrrp_ipv4_src_mac_address(config.vrid)
# socket module doesn't define IPPROTO_VRRP
self.ip_socket = socket.socket(family, socket.SOCK_RAW,
inet.IPPROTO_VRRP)
self.packet_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW,
socket.htons(ether_type))
self.packet_socket.bind((self.interface.device_name, ether_type,
socket.PACKET_MULTICAST,
arp.ARP_HW_TYPE_ETHERNET,
addrconv.mac.text_to_bin(mac_address)))
self.ifindex = if_nametoindex(self.interface.device_name)
def start(self):
# discard received packets before joining multicast membership
packet_socket = self.packet_socket
packet_socket.setblocking(0)
with hub.Timeout(0.1, False):
while True:
try:
packet_socket.recv(1500)
except socket.error:
break
packet_socket.setblocking(1)
self._join_multicast_membership(True)
self._join_vrrp_group(True)
super(VRRPInterfaceMonitorNetworkDevice, self).start()
self.threads.append(hub.spawn(self._recv_loop))
def stop(self):
self.__is_active = False
super(VRRPInterfaceMonitorNetworkDevice, self).stop()
# we assume that the structures in the following two functions for
# multicast are aligned in the same way on all the archtectures.
def _join_multicast_membership(self, join_leave):
config = self.config
if config.is_ipv6:
mac_address = vrrp.vrrp_ipv6_src_mac_address(config.vrid)
else:
mac_address = vrrp.vrrp_ipv4_src_mac_address(config.vrid)
if join_leave:
add_drop = PACKET_ADD_MEMBERSHIP
else:
add_drop = PACKET_DROP_MEMBERSHIP
# struct packet_mreq {
# int mr_ifindex;
# unsigned short mr_type;
# unsigned short mr_alen;
# unsigned char mr_mr_address[8];
# };
packet_mreq = struct.pack('IHH8s', self.ifindex,
PACKET_MR_MULTICAST, 6,
addrconv.mac.text_to_bin(mac_address))
self.packet_socket.setsockopt(SOL_PACKET, add_drop, packet_mreq)
def _join_vrrp_group(self, join_leave):
if join_leave:
join_leave = MCAST_JOIN_GROUP
else:
join_leave = MCAST_LEAVE_GROUP
# struct group_req {
# __u32 gr_interface; /* interface index */
# struct __kernel_sockaddr_storage gr_group; /* group address */
# };
group_req = struct.pack('I', self.ifindex)
# padding to gr_group. This is environment dependent
group_req += b'\x00' * (struct.calcsize('P') - struct.calcsize('I'))
if self.config.is_ipv6:
# struct sockaddr_in6 {
# sa_family_t sin6_family; /* AF_INET6 */
# in_port_t sin6_port; /* port number */
# uint32_t sin6_flowinfo; /* IPv6 flow information */
# struct in6_addr sin6_addr; /* IPv6 address */
# uint32_t sin6_scope_id; /* Scope ID (new in 2.4) */
# };
# struct in6_addr {
# unsigned char s6_addr[16]; /* IPv6 address */
# };
family = socket.IPPROTO_IPV6
sockaddr = struct.pack('H', socket.AF_INET6)
sockaddr += struct.pack('!H', 0)
sockaddr += struct.pack('!I', 0)
sockaddr += addrconv.ipv6.text_to_bin(vrrp.VRRP_IPV6_DST_ADDRESS)
sockaddr += struct.pack('I', 0)
else:
# #define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
# struct sockaddr_in {
# __kernel_sa_family_t sin_family; /* Address family */
# __be16 sin_port; /* Port number */
# struct in_addr sin_addr; /* Internet address */
# /* Pad to size of `struct sockaddr'. */
# unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
# sizeof(unsigned short int) - sizeof(struct in_addr)];
# };
# struct in_addr {
# __be32 s_addr;
# };
family = socket.IPPROTO_IP
sockaddr = struct.pack('H', socket.AF_INET)
sockaddr += struct.pack('!H', 0)
sockaddr += addrconv.ipv4.text_to_bin(vrrp.VRRP_IPV4_DST_ADDRESS)
sockaddr += b'\x00' * (SS_MAXSIZE - len(sockaddr))
group_req += sockaddr
self.ip_socket.setsockopt(family, join_leave, group_req)
return
def _recv_loop(self):
packet_socket = self.packet_socket
packet_socket.settimeout(1.3) # to check activeness periodically
try:
while self.__is_active:
try:
buf = packet_socket.recv(128)
except socket.timeout:
self.logger.debug('timeout')
continue
except:
self.logger.error('recv failed')
continue
if len(buf) == 0:
self.__is_active = False
break
self.logger.debug('recv buf')
self._send_vrrp_packet_received(buf)
finally:
self._join_vrrp_group(False)
self._join_multicast_membership(False)
@handler.set_ev_handler(vrrp_event.EventVRRPTransmitRequest)
def vrrp_transmit_request_handler(self, ev):
self.logger.debug('send')
try:
self.packet_socket.sendto(ev.data,
(self.interface.device_name, 0))
except:
self.logger.error('send failed')
def _initialize(self):
# nothing
pass
def _shutdown(self):
self.__is_active = False
| apache-2.0 |
umitproject/tease-o-matic | django/contrib/gis/db/backends/postgis/introspection.py | 514 | 4385 | from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.contrib.gis.gdal import OGRGeomType
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
cursor = self.connection.cursor()
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
try:
cursor.execute(oid_sql, ('geometry',))
GEOM_TYPE = cursor.fetchone()[0]
postgis_types = { GEOM_TYPE : 'GeometryField' }
if self.connection.ops.geography:
cursor.execute(oid_sql, ('geography',))
GEOG_TYPE = cursor.fetchone()[0]
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
postgis_types[GEOG_TYPE] = ('GeometryField', {'geography' : True})
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# intialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row: raise GeoIntrospectionError
except GeoIntrospectionError:
if self.connection.ops.geography:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
jvpoulos/cs289-hw5 | hw5-code/census_clf.py | 1 | 2480 | import numpy as np
import copy
import cPickle as pickle
import decision_tree as dt
from sklearn.cross_validation import train_test_split
# Load train data
# train, load from csv without headers
features_train = np.genfromtxt('../census-dataset/census-train-features-median.csv', delimiter=' ', skip_header=1)
# remove index column
features_train = features_train[:,1:]
labels_train = np.genfromtxt('../census-dataset/census-train-labels.csv', delimiter=' ', skip_header=1)
# remove index column
labels_train = labels_train[:,1:][:,0]
# split to obtain train and test set
x_train, x_test, y_train, y_test = train_test_split(features_train, labels_train, test_size=0.33)
# concatenate features and labels
data_train = np.column_stack((x_train, y_train))
data_test = np.column_stack((x_test, y_test))
# build decision tree using entropy
decision_tree = dt.buildtree(data_train, dt.entropy, 0.01)
min_gain_error = {}
# test minimal gain values for pruning
for min_gain_value in np.arange(0,1, 0.01):
dt_temp = copy.copy(decision_tree)
dt.prune(dt_temp, min_gain_value)
# classify test data
y_hat = map(lambda obs : dt.classify(obs, dt_temp), x_test)
y_hat = map(dt.convertToLabel, y_hat)
y_hat = np.array(y_hat)
error = (y_hat != y_test).sum() / float(y_test.shape[0])
min_gain_error[min_gain_value] = error
# prune tree with optimal min_gain value
min_gain_opt = min(dict.items(min_gain_error))[0]
dt.prune(decision_tree, min_gain_opt)
# print and draw decision tree
# dt.drawtree(decision_tree,png='census_decision_tree.png')
# dt.printtree(decision_tree)
# classify validation set with pruned tree
y_hat_val = map(lambda obs : dt.classify(obs, decision_tree), x_test)
y_hat_val = map(dt.convertToLabel, y_hat_val)
y_hat_val = np.array(y_hat_val)
# report test set error
error_val = (y_hat_val != y_test).sum() / float(y_test.shape[0])
print error_val
# load test features
features_test = np.genfromtxt('../census-dataset/census-test-features-median.csv', delimiter=' ', skip_header=1)
features_test = features_test[:,1:]
# classify test set with pruned tree
y_hat_test = map(lambda obs : dt.classify(obs, decision_tree), features_test)
y_hat_test = map(dt.convertToLabel, y_hat_test)
y_hat_test = np.array(y_hat_test)
# export labels for kaggle submission
test_ids = np.arange(1,y_hat_test.shape[0]+1, 1)
np.savetxt("census_decision_tree.csv", np.vstack((test_ids,y_hat_test)).T, delimiter=",", fmt='%1.0f',header='Id,Category')
| mit |
thehyve/variant | eggs/django-1.3.1-py2.7.egg/django/contrib/sites/tests.py | 153 | 2147 | from django.conf import settings
from django.contrib.sites.models import Site, RequestSite, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class SitesFrameworkTests(TestCase):
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertTrue(isinstance(s, Site))
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual(u"example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual(u"Example site", site.name)
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertTrue(isinstance(site, Site))
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
Site._meta.installed = False
site = get_current_site(request)
self.assertTrue(isinstance(site, RequestSite))
self.assertEqual(site.name, u"example.com")
| apache-2.0 |
malishevg/edugraph | cms/djangoapps/contentstore/tests/test_import.py | 5 | 8862 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
"""
Tests for import_from_xml using the mongo modulestore.
"""
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from path import path
import copy
from django.contrib.auth.models import User
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from contentstore.tests.modulestore_config import TEST_MODULESTORE
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.xml_importer import import_from_xml
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import _CONTENTSTORE
from xmodule.course_module import CourseDescriptor
from xmodule.exceptions import NotFoundError
from uuid import uuid4
from pymongo import MongoClient
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, MODULESTORE=TEST_MODULESTORE)
class ContentStoreImportTest(ModuleStoreTestCase):
"""
Tests that rely on the toy and test_import_course courses.
NOTE: refactor using CourseFactory so they do not.
"""
def setUp(self):
settings.MODULESTORE['default']['OPTIONS']['fs_root'] = path('common/test/data')
settings.MODULESTORE['direct']['OPTIONS']['fs_root'] = path('common/test/data')
uname = 'testuser'
email = 'test+courses@edx.org'
password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(uname, email, password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
# Save the data that we've just changed to the db.
self.user.save()
self.client = Client()
self.client.login(username=uname, password=password)
def tearDown(self):
MongoClient().drop_database(TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'])
_CONTENTSTORE.clear()
def load_test_import_course(self):
'''
Load the standard course used to test imports (for do_import_static=False behavior).
'''
content_store = contentstore()
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['test_import_course'], static_content_store=content_store, do_import_static=False, verbose=True)
course_location = CourseDescriptor.id_to_location('edX/test_import_course/2012_Fall')
course = module_store.get_item(course_location)
self.assertIsNotNone(course)
return module_store, content_store, course, course_location
def test_unicode_chars_in_course_name_import(self):
"""
# Test that importing course with unicode 'id' and 'display name' doesn't give UnicodeEncodeError
"""
module_store = modulestore('direct')
target_location = Location(['i4x', u'Юникода', 'unicode_course', 'course', u'échantillon'])
import_from_xml(
module_store,
'common/test/data/',
['2014_Uni'],
target_location_namespace=target_location
)
course = module_store.get_item(target_location)
self.assertIsNotNone(course)
# test that course 'display_name' same as imported course 'display_name'
self.assertEqual(course.display_name, u"Φυσικά το όνομα Unicode")
def test_static_import(self):
'''
Stuff in static_import should always be imported into contentstore
'''
_, content_store, course, course_location = self.load_test_import_course()
# make sure we have ONE asset in our contentstore ("should_be_imported.html")
all_assets, count = content_store.get_all_content_for_course(course_location)
print "len(all_assets)=%d" % len(all_assets)
self.assertEqual(len(all_assets), 1)
self.assertEqual(count, 1)
content = None
try:
location = StaticContent.get_location_from_path('/c4x/edX/test_import_course/asset/should_be_imported.html')
content = content_store.find(location)
except NotFoundError:
pass
self.assertIsNotNone(content)
# make sure course.static_asset_path is correct
print "static_asset_path = {0}".format(course.static_asset_path)
self.assertEqual(course.static_asset_path, 'test_import_course')
def test_asset_import_nostatic(self):
'''
This test validates that an image asset is NOT imported when do_import_static=False
'''
content_store = contentstore()
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store, do_import_static=False, verbose=True)
course_location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
module_store.get_item(course_location)
# make sure we have NO assets in our contentstore
all_assets, count = content_store.get_all_content_for_course(course_location)
self.assertEqual(len(all_assets), 0)
self.assertEqual(count, 0)
def test_no_static_link_rewrites_on_import(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'], do_import_static=False, verbose=True)
handouts = module_store.get_item(Location(['i4x', 'edX', 'toy', 'course_info', 'handouts', None]))
self.assertIn('/static/', handouts.data)
handouts = module_store.get_item(Location(['i4x', 'edX', 'toy', 'html', 'toyhtml', None]))
self.assertIn('/static/', handouts.data)
def test_tab_name_imports_correctly(self):
_module_store, _content_store, course, _course_location = self.load_test_import_course()
print "course tabs = {0}".format(course.tabs)
self.assertEqual(course.tabs[2]['name'], 'Syllabus')
def test_rewrite_reference_list(self):
module_store = modulestore('direct')
target_location = Location(['i4x', 'testX', 'conditional_copy', 'course', 'copy_run'])
import_from_xml(
module_store,
'common/test/data/',
['conditional'],
target_location_namespace=target_location
)
conditional_module = module_store.get_item(
Location(['i4x', 'testX', 'conditional_copy', 'conditional', 'condone'])
)
self.assertIsNotNone(conditional_module)
self.assertListEqual(
[
u'i4x://testX/conditional_copy/problem/choiceprob',
u'i4x://edX/different_course/html/for_testing_import_rewrites'
],
conditional_module.sources_list
)
self.assertListEqual(
[
u'i4x://testX/conditional_copy/html/congrats',
u'i4x://testX/conditional_copy/html/secret_page'
],
conditional_module.show_tag_list
)
def test_rewrite_reference(self):
module_store = modulestore('direct')
target_location = Location(['i4x', 'testX', 'peergrading_copy', 'course', 'copy_run'])
import_from_xml(
module_store,
'common/test/data/',
['open_ended'],
target_location_namespace=target_location
)
peergrading_module = module_store.get_item(
Location(['i4x', 'testX', 'peergrading_copy', 'peergrading', 'PeerGradingLinked'])
)
self.assertIsNotNone(peergrading_module)
self.assertEqual(
u'i4x://testX/peergrading_copy/combinedopenended/SampleQuestion',
peergrading_module.link_to_location
)
def test_rewrite_reference_value_dict(self):
module_store = modulestore('direct')
target_location = Location(['i4x', 'testX', 'split_test_copy', 'course', 'copy_run'])
import_from_xml(
module_store,
'common/test/data/',
['split_test_module'],
target_location_namespace=target_location
)
split_test_module = module_store.get_item(
Location(['i4x', 'testX', 'split_test_copy', 'split_test', 'split1'])
)
self.assertIsNotNone(split_test_module)
self.assertEqual(
{
"0": "i4x://testX/split_test_copy/vertical/sample_0",
"2": "i4x://testX/split_test_copy/vertical/sample_2",
},
split_test_module.group_id_to_child,
)
| agpl-3.0 |
midokura/python-neutron-plugin-midonet | midonet/neutron/extensions/subnet.py | 1 | 6971 | # Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author Jaume Devesa
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron import manager
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DHCP_HOST = 'dhcp_host'
DHCP_HOSTS = '%ss' % DHCP_HOST
SUBNET = 'midonet_subnet'
SUBNETS = '%ss' % SUBNET
# Monkey patches to add validations.
def _validate_non_negative_or_none(data, valid_values=None):
if data is not None:
attr._validate_non_negative(data, valid_values)
def _validate_range_or_none(data, valid_values=None):
if data is not None:
attr._validate_range(data, valid_values)
attr.validators['type:non_negative_or_none'] = _validate_non_negative_or_none
attr.validators['type:range_or_none'] = _validate_range_or_none
RESOURCE_ATTRIBUTE_MAP = {
SUBNETS: {
'default_gateway': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'enabled': {'allow_post': True, 'allow_put': True,
'validate': {'type:boolean': None},
'is_visible': True, 'default': True},
'server_addr': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'dns_server_addrs': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'subnet_prefix': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'subnet_length': {'allow_post': True, 'allow_put': True,
'validate': {'type:range_or_none': [0, 32]},
'is_visible': True, 'default': None},
'interface_mtu': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
},
DHCP_HOSTS: {
'ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address': None},
'is_visible': True, 'required_by_policy': True},
'mac_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:mac_address': None},
'is_visible': True, 'required_by_policy': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True}
}
}
class Subnet(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Midonet Subnet (DHCP Subnet)"
@classmethod
def get_alias(cls):
return "midonet-subnet"
@classmethod
def get_description(cls):
return "Neutron subnet with midonet extensions"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/midonet_subnet/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
# subnets
collection_name = SUBNETS
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
subnet_controller = base.create_resource(
collection_name, SUBNET, plugin, params, allow_bulk=True)
ex = extensions.ResourceExtension(collection_name, subnet_controller)
exts.append(ex)
# hosts
parent = dict(member_name=SUBNET,
collection_name=SUBNETS)
collection_name = DHCP_HOSTS
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
host_controller = base.create_resource(
collection_name, DHCP_HOST, plugin, params,
parent=parent, allow_bulk=True)
ex = extensions.ResourceExtension(
collection_name, host_controller, parent=parent)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class SubnetPluginBase(object):
@abc.abstractmethod
def create_midonet_subnet(self, context, midonet_subnet):
pass
@abc.abstractmethod
def update_midonet_subnet(self, context, id, midonet_subnet):
pass
@abc.abstractmethod
def get_midonet_subnet(self, context, midonet_subnet, fields=None):
pass
@abc.abstractmethod
def delete_midonet_subnet(self, context, id):
pass
@abc.abstractmethod
def get_midonet_subnets(self, context, filters=None, fields=None):
pass
@six.add_metaclass(abc.ABCMeta)
class SubnetDhcpHostPluginBase(object):
@abc.abstractmethod
def get_midonet_subnet_dhcp_host(self, context, id, midonet_subnet_id,
fields=None):
pass
@abc.abstractmethod
def update_midonet_subnet_dhcp_host(self, context, id, midonet_subnet_id,
dhcp_host):
pass
@abc.abstractmethod
def delete_midonet_subnet_dhcp_host(self, context, id, midonet_subnet_id):
pass
@abc.abstractmethod
def get_midonet_subnet_dhcp_hosts(self, context, midonet_subnet_id,
filters=None, fields=None):
pass
@abc.abstractmethod
def create_midonet_subnet_dhcp_host(self, context, midonet_subnet_id,
dhcp_host):
pass
| apache-2.0 |
pfcurtis/twitter-spark-sentiment | lib/oauthlib/oauth1/rfc5849/endpoints/request_token.py | 42 | 8945 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.request_token
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the request token provider logic of
OAuth 1.0 RFC 5849. It validates the correctness of request token requests,
creates and persists tokens as well as create the proper response to be
returned to the client.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import urlencode
from .base import BaseEndpoint
from .. import errors
log = logging.getLogger(__name__)
class RequestTokenEndpoint(BaseEndpoint):
"""An endpoint responsible for providing OAuth 1 request tokens.
Typical use is to instantiate with a request validator and invoke the
``create_request_token_response`` from a view function. The tuple returned
has all information necessary (body, status, headers) to quickly form
and return a proper response. See :doc:`/oauth1/validator` for details on which
validator methods to implement for this endpoint.
"""
def create_request_token(self, request, credentials):
"""Create and save a new request token.
:param request: An oauthlib.common.Request object.
:param credentials: A dict of extra token credentials.
:returns: The token as an urlencoded string.
"""
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
'oauth_callback_confirmed': 'true'
}
token.update(credentials)
self.request_validator.save_request_token(token, request)
return urlencode(token.items())
def create_request_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import RequestTokenEndpoint
>>> endpoint = RequestTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_request_token_response(
... 'https://your.provider/request_token?foo=bar',
... headers={
... 'Authorization': 'OAuth realm=movies user, oauth_....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_callback_confirmed=true&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+callback+uri'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_request_token_request(
request)
if valid:
token = self.create_request_token(request, credentials or {})
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code
def validate_request_token_request(self, request):
"""Validate a request token request.
:param request: An oauthlib.common.Request object.
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if request.realm:
request.realms = request.realm.split(' ')
else:
request.realms = self.request_validator.get_default_realms(
request.client_key, request)
if not self.request_validator.check_realms(request.realms):
raise errors.InvalidRequestError(
description='Invalid realm %s. Allowed are %r.' % (
request.realms, self.request_validator.realms))
if not request.redirect_uri:
raise errors.InvalidRequestError(
description='Missing callback URI.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_requested_realms(
request.client_key, request.realms, request)
# Callback is normally never required, except for requests for
# a Temporary Credential as described in `Section 2.1`_
# .._`Section 2.1`: http://tools.ietf.org/html/rfc5849#section-2.1
valid_redirect = self.request_validator.validate_redirect_uri(
request.client_key, request.redirect_uri, request)
if not request.redirect_uri:
raise NotImplementedError('Redirect URI must either be provided '
'or set to a default during validation.')
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_realm, valid_redirect, valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s.", valid_client)
log.info("Valid realm: %s.", valid_realm)
log.info("Valid callback: %s.", valid_redirect)
log.info("Valid signature: %s.", valid_signature)
return v, request
| gpl-2.0 |
flappycoin-project/flappycoin-old | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
MarcusJones/py_ExergyUtilities | SenegalUtilities/senegal_process_excel.py | 2 | 11595 |
# -*- coding: utf-8 -*-
import copy
import csv
import logging
import os
import utility_excel_api as util_xl
from collections import defaultdict
from tables.hdf5extension import VLArray
from util_pretty_print import print_table
#from ExergyUtilities.senegal_process_excel import get_village_row_dict
logging.basicConfig(level=logging.DEBUG)
#http://www.jkp-ads.com/Articles/Excel2007TablesVBA.asp
#https://msdn.microsoft.com/en-us/library/office/jj231257.aspx
path_dir = r"C:\CesCloud Senegal PV\03 Working\Liste des villages"
name_file = r"UE List_Finale r28.xlsx"
full_path = os.path.join(path_dir,name_file)
def get_clusters_dict(xl):
"""
Return a dictionary of cluster_number : [village names]
"""
table_clusters = xl.get_table_literal("Clusters", "tab_Clusters")
dict_cluster = defaultdict(list)
for i,row in enumerate(table_clusters.ListRows):
cluster, village = (table_clusters.ListRows(i+1).Range)
dict_cluster[int(cluster)].append(village.__str__())
return dict_cluster
def cluster_num(clusters_dict, village_name):
pass
def get_column_index_dict(table):
dict_cols = {}
for i,item in enumerate(table.ListColumns):
# ADD ONE FOR EXCEL INDEXING
dict_cols[item.__str__()] = i+1
#print(i,item)
return dict_cols
def get_village_row_dict(table_villages):
# Which column?
col_dict = get_column_index_dict(table_villages)
name_row_idx = col_dict["VILLAGE"]
village_row_dict = {}
# Which row?
for i,row in enumerate(table_villages.ListRows):
this_name = table_villages.ListRows(i+1).Range(name_row_idx)
this_name = this_name.__str__()
# ADD ONE FOR EXCEL INDEXING
village_row_dict[this_name] = i + 1
return village_row_dict
def get_village_row(table_villages,village_name):
# Switched to DICT
#raise
# Which column?
col_dict = get_column_index_dict(table_villages)
name_row_idx = col_dict["VILLAGE"]
#print(name_row_idx)
# Which row?
for i,row in enumerate(table_villages.ListRows):
this_name = table_villages.ListRows(i+1).Range(name_row_idx)
this_name = this_name.__str__()
#print(i,this_name)
if this_name == village_name:
logging.debug("Match {} {} {}".format(this_name, village_name,i ))
return i
else:
#print(type(this_name))
#print(type(village_name))
#logging.debug("NO Match {} {} {}".format(this_name, village_name,i))
pass
raise KeyError("{} not in list".format(village_name))
#print(table_villages.Column["VILLAGE"])
#oSh.Range("Table1[Column2]").Select
#raise
def number_clusters(dict_clusters,village_row_dict,data_headers,table_villages):
# Write the cluster number to the appropriate column in Excel for each village
print(dict_clusters)
for k in dict_clusters:
cluster_num = k
villages_in_cluster = dict_clusters[k]
#print("{} - {}".format(k, ))
# Check data match between clusters and village names
for village in villages_in_cluster:
if not village in village_row_dict:
raise KeyError("{} not in list".format(village))
tgt_row = village_row_dict[village]
tgt_col = data_headers["CLUSTER_NUMBER"]
table_villages.ListRows(tgt_row).Range(1,tgt_col).Value = cluster_num
logging.info("Wrote cluster {} to village {} [{},{}]".format(cluster_num,village,tgt_row,tgt_col))
def average_for_coords(coord_list):
return sum(coord_list)/len(coord_list)
def process_column(rows, tgt_col, new_row, function):
"""
Update ROW list for the function applied to all source row
"""
collector_list = list()
for row in rows:
#print(row)
if row(1,tgt_col).Value:
collector_list.append(row(1,tgt_col).Value)
# Re-index -1 for python
new_row[tgt_col-1] = function(collector_list)
return new_row
def group_clusters(table_villages):
col_headers = get_column_index_dict(table_villages)
col_headers_list = table_villages.ListColumns
cluster_rows = defaultdict(list)
for i,row_obj in enumerate(table_villages.ListRows):
idx_xl = i + 1
this_row = table_villages.ListRows(idx_xl).Range
cluster_num = table_villages.ListRows(idx_xl).Range(col_headers["CLUSTER_NUMBER"]).Value
if cluster_num:
cluster_num = int(cluster_num)
cluster_rows[cluster_num].append(this_row)
for k in cluster_rows:
#print(k)
number_villages = len(cluster_rows[k])
logging.info("Processing cluster {} with {} villages".format(k,number_villages))
village_names = list()
for row in cluster_rows[k]:
village_names.append(str(row[col_headers["VILLAGE"] -1 ] ))
new_row = [None for i in range(len(row))]
new_row[col_headers["CLUSTERED VILLAGES"] -1 ] = ", ".join(village_names)
new_row[col_headers["VILLAGE"] -1 ] = "Cluster {} with {} villages".format(k,number_villages)
#---Calculate center of mass
new_row = process_column(cluster_rows[k], col_headers["X_COORD"], new_row, average_for_coords)
new_row = process_column(cluster_rows[k], col_headers["Y_COORD"], new_row, average_for_coords)
# Population
new_row = process_column(cluster_rows[k], col_headers["POPULATION_ACTUELLE"], new_row, sum)
# Menage
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE MENAGE"], new_row, sum)
# Conncession
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE CONCESSION"], new_row, sum)
# Conncession
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE BOUTIQUE "], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE D'ATELIER"], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE FORAGE/ POMPAGE MANUEL"], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE D'ECOLE"], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE CASE DE SANTE"], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE MOSQUEE"], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE MARCHE"], new_row, sum)
new_row = process_column(cluster_rows[k], col_headers["NOMBRE DE MOULIN"], new_row, sum)
new_row[col_headers["COUNT CLUSTERED"] -1 ] = number_villages
# Apply from previous row
new_row[col_headers["COMMUNE"] -1] = row(1,col_headers["COMMUNE"])
new_row[col_headers["DEPARTEMENT"] -1 ] = row(1,col_headers["DEPARTEMENT"])
new_row[col_headers["REGION"] -1 ] = row(1,col_headers["REGION"])
print_table([col_headers_list,new_row])
insert_row = table_villages.ListRows.Add(1)
for i,item in enumerate(insert_row.Range.Value[0]):
xl_index = i +1
#print(i,new_row[i])
insert_row.Range(1,xl_index).Value = new_row[i]
def apply_region_numbers(table_villages):
col_headers = get_column_index_dict(table_villages)
col_headers_list = table_villages.ListColumns
selection1 = "MATAM","KANEL","RANEROU FERLO"
selection2 = "GOUDIRY","BAKEL"
#oSh.Range("Table1[Column2]").Select
for i,row_obj in enumerate(table_villages.ListRows):
idx_xl = i + 1
this_row = table_villages.ListRows(idx_xl).Range
region = table_villages.ListRows(idx_xl).Range(col_headers["DEPARTEMENT"]).Value
print(region)
if region in selection1:
table_villages.ListRows(idx_xl).Range(col_headers["DEPARTEMENT"]).Value
#table_villages.ListRows(tgt_row).Range(1,tgt_col).Value = cluster_num
print("1")
elif region in selection2:
print("2")
else:
print(this_row)
raise
#
# for row in table_villages.Range:
# print(row)
#
# # Check data match between clusters and village names
# for village in villages_in_cluster:
# if not village in village_row_dict:
# raise KeyError("{} not in list".format(village))
#
# tgt_row = village_row_dict[village]
# tgt_col = data_headers["CLUSTER_NUMBER"]
#
# table_villages.ListRows(tgt_row).Range(1,tgt_col).Value = cluster_num
# logging.info("Wrote cluster {} to village {} [{},{}]".format(cluster_num,village,tgt_row,tgt_col))
def main():
print(full_path)
with util_xl.ExtendedExcelBookAPI(full_path) as xl:
print(xl)
print(xl.get_sheet_names())
# Get clusters
dict_clusters = get_clusters_dict(xl)
# Get villages table
table_villages = xl.get_table_literal("Villages", "tab_Villages")
data_headers = get_column_index_dict(table_villages)
# Get village name : rows
village_row_dict = get_village_row_dict(table_villages)
if 0:
number_clusters(dict_clusters,village_row_dict,data_headers,table_villages)
if 0:
group_clusters(table_villages)
apply_region_numbers(table_villages)
#get_column_index_dict(table)
#print("Cluster {} Village {} on row {}".format(cluster_num,village,village_row_dict[village]))
#get_village_row(table_villages,"BODE")
#xl.save_and_close()
#for
#get_village()
if __name__ == "__main__":
main()
#---- OLD
#
# #print(eastings)
# avg_easting = sum(eastings)/number_villages
# #print(avg_eastings)
#
# #print(northings)
# avg_northing = sum(northings)/number_villages
# #print(avg_northings)
#
# #---Sum population
# total_pop = sum(population)
# print(total_pop)
#
# #---Sum menage
# total_menage = sum(menage)
# print(total_menage)
#
# #---Sum concession
# #print(conncession)
# total_conncession = sum(conncession)
# print(total_conncession)
#
# # Collect coordinates
# tgt_col = col_headers["X_COORD"]
# eastings.append(row(1,tgt_col).Value)
# tgt_col = col_headers["Y_COORD"]
# northings.append(row(1,tgt_col).Value)
#
# # Collect Population
# tgt_col = col_headers["POPULATION_ACTUELLE"]
# population.append(row(1,tgt_col).Value)
#
# # Collect Menage
# tgt_col = col_headers["NOMBRE DE MENAGE"]
# this_menage = row(1,tgt_col).Value
# menage.append(this_menage)
#
# # Collect conncession
# tgt_col = col_headers["NOMBRE DE CONCESSION"]
# this_conncession = row(1,tgt_col).Value
# if this_conncession:
# conncession.append(this_conncession) | lgpl-3.0 |
geheimnis/core-commands | cryptoalgo/hash/md5.py | 1 | 9805 | """
/* MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
*/
/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
rights reserved.
License to copy and use this software is granted provided that it
is identified as the "RSA Data Security, Inc. MD5 Message-Digest
Algorithm" in all material mentioning or referencing this software
or this function.
License is also granted to make and use derivative works provided
that such works are identified as "derived from the RSA Data
Security, Inc. MD5 Message-Digest Algorithm" in all material
mentioning or referencing the derived work.
RSA Data Security, Inc. makes no representations concerning either
the merchantability of this software or the suitability of this
software for any particular purpose. It is provided "as is"
without express or implied warranty of any kind.
These notices must be retained in any copies of any part of this
documentation and/or software.
*/
"""
#/* Constants for MD5Transform routine.
S11 = 7
S12 = 12
S13 = 17
S14 = 22
S21 = 5
S22 = 9
S23 = 14
S24 = 20
S31 = 4
S32 = 11
S33 = 16
S34 = 23
S41 = 6
S42 = 10
S43 = 15
S44 = 21
PADDING = "\x80" + 63*"\0" # do not overlook first byte again :-)
#/* F, G, H and I are basic MD5 functions.
def F(x, y, z): return (((x) & (y)) | ((~x) & (z)))
def G(x, y, z): return (((x) & (z)) | ((y) & (~z)))
def H(x, y, z): return ((x) ^ (y) ^ (z))
def I(x, y, z): return((y) ^ ((x) | (~z)))
#/* ROTATE_LEFT rotates x left n bits.
def ROTATE_LEFT(x, n):
x = x & 0xffffffffL # make shift unsigned
return (((x) << (n)) | ((x) >> (32-(n)))) & 0xffffffffL
#/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
#Rotation is separate from addition to prevent recomputation.
def FF(a, b, c, d, x, s, ac):
a = a + F ((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT ((a), (s))
a = a + b
return a # must assign this to a
def GG(a, b, c, d, x, s, ac):
a = a + G ((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT ((a), (s))
a = a + b
return a # must assign this to a
def HH(a, b, c, d, x, s, ac):
a = a + H ((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT ((a), (s))
a = a + b
return a # must assign this to a
def II(a, b, c, d, x, s, ac):
a = a + I ((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT ((a), (s))
a = a + b
return a # must assign this to a
class md5:
def __init__(self, initial=None):
self.count = 0L
self.state = (0x67452301L,
0xefcdab89L,
0x98badcfeL,
0x10325476L,)
self.buffer = ""
if initial:
self.update(initial)
##/* MD5 block update operation. Continues an MD5 message-digest
## operation, processing another message block, and updating the
## context.
## */
## /* Compute number of bytes mod 64 */
def update(self, input):
inputLen = len(input)
## index = (unsigned int)((context->count[0] >> 3) & 0x3F);
index = int(self.count >> 3) & 0x3F
## /* Update number of bits */
self.count = self.count + (inputLen << 3)
## partLen = 64 - index;
partLen = 64 - index
## /* Transform as many times as possible.
if inputLen >= partLen:
self.buffer = self.buffer[:index] + input[:partLen]
self.transform(self.buffer)
i = partLen
while i + 63 < inputLen:
self.transform(input[i:i+64])
i = i + 64
index = 0
else:
i = 0
## /* Buffer remaining input */
self.buffer = self.buffer[:index] + input[i:inputLen]
##/* MD5 finalization. Ends an MD5 message-digest operation, writing the
## the message digest and zeroizing the context.
## */
def final(self):
## /* Save number of bits */
bits = Encode((self.count & 0xffffffffL, self.count>>32), 8)
## /* Pad out to 56 mod 64.
index = int((self.count >> 3) & 0x3f)
if index < 56:
padLen = (56 - index)
else:
padLen = (120 - index)
self.update(PADDING[:padLen])
## /* Append length (before padding) */
self.update(bits)
## /* Store state in digest */
digest = Encode(self.state, 16)
## /* Zeroize sensitive information.
self.__dict__.clear()
return digest
digest = final # alias
##/* MD5 basic transformation. Transforms state based on block.
## */
def transform(self, block):
a, b, c, d = state = self.state
x = Decode(block, 64)
## /* Round 1 */
a = FF (a, b, c, d, x[ 0], S11, 0xd76aa478)#; /* 1 */
d = FF (d, a, b, c, x[ 1], S12, 0xe8c7b756)#; /* 2 */
c = FF (c, d, a, b, x[ 2], S13, 0x242070db)#; /* 3 */
b = FF (b, c, d, a, x[ 3], S14, 0xc1bdceee)#; /* 4 */
a = FF (a, b, c, d, x[ 4], S11, 0xf57c0faf)#; /* 5 */
d = FF (d, a, b, c, x[ 5], S12, 0x4787c62a)#; /* 6 */
c = FF (c, d, a, b, x[ 6], S13, 0xa8304613)#; /* 7 */
b = FF (b, c, d, a, x[ 7], S14, 0xfd469501)#; /* 8 */
a = FF (a, b, c, d, x[ 8], S11, 0x698098d8)#; /* 9 */
d = FF (d, a, b, c, x[ 9], S12, 0x8b44f7af)#; /* 10 */
c = FF (c, d, a, b, x[10], S13, 0xffff5bb1)#; /* 11 */
b = FF (b, c, d, a, x[11], S14, 0x895cd7be)#; /* 12 */
a = FF (a, b, c, d, x[12], S11, 0x6b901122)#; /* 13 */
d = FF (d, a, b, c, x[13], S12, 0xfd987193)#; /* 14 */
c = FF (c, d, a, b, x[14], S13, 0xa679438e)#; /* 15 */
b = FF (b, c, d, a, x[15], S14, 0x49b40821)#; /* 16 */
## /* Round 2 */
a = GG (a, b, c, d, x[ 1], S21, 0xf61e2562)#; /* 17 */
d = GG (d, a, b, c, x[ 6], S22, 0xc040b340)#; /* 18 */
c = GG (c, d, a, b, x[11], S23, 0x265e5a51)#; /* 19 */
b = GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa)#; /* 20 */
a = GG (a, b, c, d, x[ 5], S21, 0xd62f105d)#; /* 21 */
d = GG (d, a, b, c, x[10], S22, 0x2441453)#; /* 22 */
c = GG (c, d, a, b, x[15], S23, 0xd8a1e681)#; /* 23 */
b = GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8)#; /* 24 */
a = GG (a, b, c, d, x[ 9], S21, 0x21e1cde6)#; /* 25 */
d = GG (d, a, b, c, x[14], S22, 0xc33707d6)#; /* 26 */
c = GG (c, d, a, b, x[ 3], S23, 0xf4d50d87)#; /* 27 */
b = GG (b, c, d, a, x[ 8], S24, 0x455a14ed)#; /* 28 */
a = GG (a, b, c, d, x[13], S21, 0xa9e3e905)#; /* 29 */
d = GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8)#; /* 30 */
c = GG (c, d, a, b, x[ 7], S23, 0x676f02d9)#; /* 31 */
b = GG (b, c, d, a, x[12], S24, 0x8d2a4c8a)#; /* 32 */
## /* Round 3 */
a = HH (a, b, c, d, x[ 5], S31, 0xfffa3942)#; /* 33 */
d = HH (d, a, b, c, x[ 8], S32, 0x8771f681)#; /* 34 */
c = HH (c, d, a, b, x[11], S33, 0x6d9d6122)#; /* 35 */
b = HH (b, c, d, a, x[14], S34, 0xfde5380c)#; /* 36 */
a = HH (a, b, c, d, x[ 1], S31, 0xa4beea44)#; /* 37 */
d = HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9)#; /* 38 */
c = HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60)#; /* 39 */
b = HH (b, c, d, a, x[10], S34, 0xbebfbc70)#; /* 40 */
a = HH (a, b, c, d, x[13], S31, 0x289b7ec6)#; /* 41 */
d = HH (d, a, b, c, x[ 0], S32, 0xeaa127fa)#; /* 42 */
c = HH (c, d, a, b, x[ 3], S33, 0xd4ef3085)#; /* 43 */
b = HH (b, c, d, a, x[ 6], S34, 0x4881d05)#; /* 44 */
a = HH (a, b, c, d, x[ 9], S31, 0xd9d4d039)#; /* 45 */
d = HH (d, a, b, c, x[12], S32, 0xe6db99e5)#; /* 46 */
c = HH (c, d, a, b, x[15], S33, 0x1fa27cf8)#; /* 47 */
b = HH (b, c, d, a, x[ 2], S34, 0xc4ac5665)#; /* 48 */
## /* Round 4 */
a = II (a, b, c, d, x[ 0], S41, 0xf4292244)#; /* 49 */
d = II (d, a, b, c, x[ 7], S42, 0x432aff97)#; /* 50 */
c = II (c, d, a, b, x[14], S43, 0xab9423a7)#; /* 51 */
b = II (b, c, d, a, x[ 5], S44, 0xfc93a039)#; /* 52 */
a = II (a, b, c, d, x[12], S41, 0x655b59c3)#; /* 53 */
d = II (d, a, b, c, x[ 3], S42, 0x8f0ccc92)#; /* 54 */
c = II (c, d, a, b, x[10], S43, 0xffeff47d)#; /* 55 */
b = II (b, c, d, a, x[ 1], S44, 0x85845dd1)#; /* 56 */
a = II (a, b, c, d, x[ 8], S41, 0x6fa87e4f)#; /* 57 */
d = II (d, a, b, c, x[15], S42, 0xfe2ce6e0)#; /* 58 */
c = II (c, d, a, b, x[ 6], S43, 0xa3014314)#; /* 59 */
b = II (b, c, d, a, x[13], S44, 0x4e0811a1)#; /* 60 */
a = II (a, b, c, d, x[ 4], S41, 0xf7537e82)#; /* 61 */
d = II (d, a, b, c, x[11], S42, 0xbd3af235)#; /* 62 */
c = II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb)#; /* 63 */
b = II (b, c, d, a, x[ 9], S44, 0xeb86d391)#; /* 64 */
self.state = (0xffffffffL & (state[0] + a),
0xffffffffL & (state[1] + b),
0xffffffffL & (state[2] + c),
0xffffffffL & (state[3] + d),)
## /* Zeroize sensitive information.
del x
import struct, string
def Encode(input, len):
k = len >> 2
res = apply(struct.pack, ("%iI" % k,) + tuple(input[:k]))
return string.join(res, "")
def Decode(input, len):
k = len >> 2
res = struct.unpack("%iI" % k, input[:len])
return list(res)
"""
The above is a slightly modified work from the RSA Data Security, Inc. MD5
Message-Digest Algorithm.
Now begin our interface.
"""
try:
import hashlib
md5class = hashlib.md5
except:
md5class = md5
class hash_class:
def __init__(self):
pass
def get_name(self):
return 'MD5'
def get_output_size(self):
return 128
def get_block_size(self):
return 512
def hash(self, text):
return md5class(text).digest()
if __name__ == '__main__':
assert hash_class().hash('').encode('hex') == \
'd41d8cd98f00b204e9800998ecf8427e'
| gpl-3.0 |
JianyuWang/nova | nova/tests/unit/api/ec2/test_ec2utils.py | 84 | 2549 | # Copyright 2014 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.ec2 import ec2utils
from nova import context
from nova import objects
from nova import test
class EC2UtilsTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
ec2utils.reset_cache()
super(EC2UtilsTestCase, self).setUp()
def test_get_int_id_from_snapshot_uuid(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
self.assertEqual(smap.id, smap_id)
def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
self.assertEqual('fake-uuid', smap.uuid)
def test_get_snapshot_uuid_from_int_id(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
self.assertEqual(smap.uuid, smap_uuid)
def test_id_to_glance_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
self.assertEqual(uuid, s3imap.uuid)
def test_glance_id_to_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
self.assertEqual(s3imap_id, s3imap.id)
def test_glance_id_to_id_creates_mapping(self):
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
self.assertEqual('fake-uuid', s3imap.uuid)
| apache-2.0 |
Z-KO/url-expanderscanner-bot | botmain.py | 2 | 1380 | import socket
import re
import simplejson
import urllib
import urllib2
import urlclean
url = "https://www.virustotal.com/vtapi/v2/url/report"
server = "server"
channel = "#channel"
botnick = "botnick"
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((server, 6667)) #connect to server using 6667
ircsock.send(u'NICK %s\n' % (botnick))#user auth
ircsock.send(u'USER %s bla %s :%s\n' % (botnick, server, botnick))
def ping():
ircsock.send("PONG :pong\n")
def sendmsg(chan, msg):
ircsock.send("PRIVMSG "+chan+" :" + msg +"\n")
def joinchan(chan):
ircsock.send("JOIN " + chan +"\n")
def extracturl(msg):
if "PRIVMSG" not in msg:
return False
url = re.search("(?P<url>https?://[^\s]+)", msg)
if url is not None:
link = url.group("url")
return urlclean.unshorten(link)
else:
return False
joinchan(channel)
while True:
ircmsg = ircsock.recv(2048)
ircmsg = ircmsg.strip('\n\r')
if extracturl(ircmsg) != False:
link = extracturl(ircmsg)
parameters = {"resource": link,
"apikey": "virus total api key"}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
positives = response_dict.get('positives')
print(ircmsg)
if ircmsg.find("PING :") != -1: #respond to server pings
ping()
| mit |
y-higuchi/ramcloud | systemtests/run.py | 18 | 2525 | #!/usr/bin/env python
# Copyright (c) 2012 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Runs RAMCloud system tests. Should be run from the top-level directory
of the RAMCloud source tree.
Notice these tests run on multiple machines throughout the cluster which
has a few implcations. First, you'll need a passwordless ssh logins to
work for your user before you can run these. Second, you'll need ssh
master mode configured to run them reliably.
See https://ramcloud.stanford.edu/wiki/display/ramcloud/Running+Recoveries+with+recovery.py
Finally, they take a long time to run; just starting and stopping the processes
may take several seconds for each test.
Tests use config.py/localconfig.py to find a list of hosts where the tests
should run. Make sure you've coordinated with anyone else who might be
using the machines you have listed there before running these tests.
This runner attempts to automatically discover tests. It imports
all the modules in the directory and looks for a 'suite' field. If it exists
the tests in the suite are added and run.
"""
from __future__ import division, print_function
import sys
import os
suites = []
for module in os.listdir(os.path.dirname(__file__)):
if module == 'run.py' or module == '__init__.py' or module[-3:] != '.py':
continue
module_name = module[:-3]
__import__(module_name, locals(), globals())
try:
mod = sys.modules['systemtests.%s' % module_name]
except KeyError:
mod = sys.modules[module_name]
try:
suites.append(mod.suite)
except AttributeError:
print('No test suite found in %s' % module_name)
else:
print('Adding tests from %s' % module_name)
import unittest
alltests = unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(alltests)
| isc |
donckers/ansible | lib/ansible/galaxy/role.py | 6 | 12494 | ########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not C.GALAXY_IGNORE_CERTS
# set validate_certs
if galaxy.options.ignore_certs:
self._validate_certs = False
display.vvv('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for role_path_dir in galaxy.roles_paths:
role_path = os.path.join(role_path_dir, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and self.version not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
meta_file = member
break
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
except OSError as e:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % self.name)
try:
os.unlink(tmp_file)
except (OSError,IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| gpl-3.0 |
infoxchange/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/admin/templatetags/admin_list.py | 43 | 12835 | import datetime
from django.conf import settings
from django.contrib.admin.util import lookup_field, display_for_field, label_for_field
from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE
from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
register = Library()
DOT = '.'
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
paginator_number = register.simple_tag(paginator_number)
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
pagination = register.inclusion_tag('admin/pagination.html')(pagination)
def result_headers(cl):
"""
Generates the list column headers.
"""
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
header, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": header,
"class_attrib": mark_safe(' class="action-checkbox-column"')
}
continue
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
admin_order_field = None
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {
"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" />' % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField) or isinstance(f, models.TimeField):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if form and field_name in form.fields:
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
def result_list(cl):
"""
Displays the headers and data list together
"""
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': list(result_headers(cl)),
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(result_list)
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(date_hierarchy)
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
search_form = register.inclusion_tag('admin/search_form.html')(search_form)
def admin_list_filter(cl, spec):
return {'title': spec.title(), 'choices' : list(spec.choices(cl))}
admin_list_filter = register.inclusion_tag('admin/filter.html')(admin_list_filter)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
admin_actions = register.inclusion_tag("admin/actions.html", takes_context=True)(admin_actions)
| gpl-3.0 |
is06/navitia | source/tyr/migrations/versions/1fd68e6d0456_.py | 17 | 4088 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
""" init database
Revision ID: 1fd68e6d0456
Revises: None
Create Date: 2014-01-10 16:25:30.432738
"""
# revision identifiers, used by Alembic.
revision = '1fd68e6d0456'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('api',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.Text(), nullable=False),
sa.Column('email', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('login')
)
op.create_table('instance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('is_free', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('authorization',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('instance_id', sa.Integer(), nullable=False),
sa.Column('api_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['api_id'], ['api.id'], ),
sa.ForeignKeyConstraint(['instance_id'], ['instance.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'instance_id', 'api_id')
)
op.create_table('job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_uuid', sa.Text(), nullable=True),
sa.Column('filename', sa.Text(), nullable=True),
sa.Column('type', sa.Text(), nullable=True),
sa.Column('instance_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['instance_id'], ['instance.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('key',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('token', sa.Text(), nullable=False),
sa.Column('valid_until', sa.Date(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token')
)
api = table('api', column('name', sa.String))
op.bulk_insert(api, [{'name': 'ALL'}])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('key')
op.drop_table('job')
op.drop_table('authorization')
op.drop_table('instance')
op.drop_table('user')
op.drop_table('api')
### end Alembic commands ###
| agpl-3.0 |
django-nonrel/django-nonrel | tests/regressiontests/views/tests/defaults.py | 50 | 3720 | from os import path
from django.conf import settings
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from regressiontests.views.models import Author, Article, UrlArticle
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
fixtures = ['testdata.json']
non_existing_urls = ['/views/non_existing_url/', # this is in urls.py
'/views/other_non_existing_url/'] # this NOT in urls.py
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.non_existing_urls:
response = self.client.get(url)
csrf_token = response.context['csrf_token']
self.assertNotEqual(str(csrf_token), 'NOTPROVIDED')
self.assertNotEqual(str(csrf_token), '')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/views/server_error/')
self.assertEqual(response.status_code, 500)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
| bsd-3-clause |
arthurmco/ghostwriter | ghostwriter/__init__.py | 1 | 12587 | # GhostWriter main file
# Copyright (C) 2017 Arthur M
#
from flask import Flask, request, jsonify, render_template, flash, redirect, url_for
from flask_login import LoginManager, login_user, logout_user, current_user, login_required
from ghostwriter.models.modelmanager import ModelManager
app = Flask("ghostwriter");
app.config.from_envvar('GHOSTWRITER_CONFIG')
mm = ModelManager(app)
mm.setDatabaseURI(app.config['GHOSTWRITER_DATABASE'])
mm.init()
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(session_id):
from ghostwriter.UserManager import UserManager, get_user_manager
um = get_user_manager()
session_parts = str(session_id).split('|')
us = um.getLoggedUsersbyToken(session_parts[1])
if not us:
return None
for u in us:
if u.uid == int(session_parts[0]):
return u
return None
# Test route
@app.route('/')
def test_run():
return """Ghostwriter 0.0.1 installed successfully<br/>
Please disable this route in the setup (not existent yet) """
def post_array_to_dictionary(posts):
from ghostwriter.Post import Post
post_arr = []
for post in posts:
post_val = {
'id': post.ID,
'title': post.title,
'creation_date': post.creation_date.isoformat(),
'summary': post.getSummary(),
'owner': {
'id': post.getOwner().uid,
'name': post.getOwner().name
}
}
post_arr.append(post_val)
return post_arr
# REST interfaces
@app.route('/api/posts/', methods=['GET'])
def posts_get():
"""
Gets all posts
Returns a JSON with their information, without content data
"""
from ghostwriter.Post import Post, PostManager
pm = PostManager()
posts = pm.getAllPosts()
if len(posts) <= 0:
return jsonify({'error': 'No posts found'}), 404
return jsonify(post_array_to_dictionary(posts))
@app.route('/api/post/search', methods=['GET'])
def posts_search(userid = None):
"""
Search for posts, by date or title
"""
vsearch = {}
title = request.args.get('title')
if not (title is None):
vsearch['title'] = title
cdate = request.args.get('cdate')
if not (cdate is None):
vsearch['creation_date'] = cdate
from ghostwriter.Post import Post, PostManager
pm = PostManager()
posts = pm.filterPosts(**vsearch)
if posts is None:
return jsonify({'error', 'No filter specified'}), 404 # TODO: think of a better error code
if len(posts) <= 0:
return jsonify({'error': 'No posts found'}), 404
return jsonify(post_array_to_dictionary(posts))
@app.route('/api/post/<int:id>/content', methods=['GET', 'PUT'])
def post_get_content(id):
"""
Retrieves/sets post content
Returns post data in its native XHTML format, or 404 if post not found
GET: Retrieve the content
PUT: Updates content
"""
from ghostwriter.Post import Post, PostManager
pm = PostManager()
post = pm.getPostbyID(id)
if post is None:
return '',404
if request.method == 'GET':
return post.getContent()
if request.method == 'PUT':
if not current_user.is_authenticated:
return login_manager.unauthorized()
post.setContent(request.form['content'])
pm.updatePostContent(post)
return '',200
@app.route('/api/post/<int:id>/', methods=['GET', 'DELETE', 'PUT'])
def post_get(id):
"""
Manage the post
If post not found, give a 404 Not Found error
GET: Retrieve post metadata, in JSON format
PUT: Update any field of post metadata. Return it in JSON format
DELETE: Delete the post
"""
from ghostwriter.Post import Post, PostManager
pm = PostManager()
post = pm.getPostbyID(id)
if post is None:
return jsonify(
{'error': 'The post could not be found'}), 404
if request.method == 'GET':
jdata = { 'id': post.ID,
'title': post.title,
'creation_date': post.creation_date.isoformat(),
'summary': post.getSummary(),
'owner': {
'id': post.getOwner().uid,
'name': post.getOwner().name
}
}
return jsonify(jdata), 200
elif request.method == 'DELETE':
#TODO: check parmissions
if current_user.uid != post.getOwner().uid:
return jsonify({'error': 'Not authorized to delete the pos'}),401
pm.removePost(post)
return "",200
elif request.method == 'PUT':
title = request.form['title']
post.title = title
pm.updatePostMetadata(post)
jdata = { 'id': post.ID,
'title': post.title,
'creation_date': post.creation_date.isoformat(),
'summary': post.getSummary(),
'owner': {
'id': post.getOwner().uid,
'name': post.getOwner().name
}
}
return jsonify(jdata), 200
else:
return "",405
@app.route('/api/post/create/', methods=['POST'])
@login_required
def post_create():
"""
Creates post
Returns post metadata, with ID
"""
from ghostwriter.Post import Post, PostManager
pm = PostManager()
post = Post(current_user.uid, request.form['title'])
pm.addPost(post)
jdata = { 'id': post.ID,
'title': post.title,
'creation_date': post.creation_date.isoformat(),
'summary': post.getSummary(),
'owner': {
'id': post.getOwner().uid,
'name': post.getOwner().name
}
}
return jsonify(jdata), 200
@app.route('/api/users/', methods=['GET', 'POST'])
@login_required
def user_list_manage():
"""
Manages users
GET: Gets all users
POST: Creates an user
username: login name
password: the password
name: the user true name. Optional
Return an 200 OK if all OK
"""
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager
um = UserManager()
if request.method == 'GET':
userlist = um.getAllUsers()
if len(userlist) <= 0:
return jsonify({'error': 'No users'}), 404
juser = []
for user in userlist:
jdata = {'id': user.uid,
'username': user.username,
'name': user.name}
juser.append(jdata)
return jsonify(juser), 200
elif request.method == 'POST':
login = request.form['username']
password = request.form['password']
try:
name = request.form['name']
except KeyError:
name = login
user = User(login, name)
um.addUser(user, password)
jdata = {'id': user.uid,
'username': user.username,
'name': user.name}
return jsonify(jdata),200
else:
return "",405
@app.route('/api/user/<int:userid>/', methods=['GET','DELETE', 'PUT'])
@login_required
def user_manage(userid):
"""
Manages an individual user
GET: Gets information from user with id 'userid'
DELETE: Delete said user
PUT: Update user information, unless password
Returns 404 Not Found if user not found, or 403 Forbidden
if trying to delete a user you are logged in
"""
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager
um = UserManager()
u = um.getUserbyID(userid)
if u is None:
return jsonify({'error': 'User not found'}), 404
if request.method == 'GET':
jdata = {'id': u.uid,
'username': u.username,
'name': u.name}
return jsonify(jdata), 200
elif request.method == "PUT":
u.username = request.form['username']
u.name = request.form['name']
if 'old_password' in request.form:
if u.checkPassword(request.form['old_password'], um):
if 'password' in request.form:
um.updatePassword(u, request.form['password'])
um.updateUser(u)
jdata = {'id': u.uid,
'username': u.username,
'name': u.name}
return jsonify(jdata), 200
elif request.method == 'DELETE':
if current_user.uid == u.uid:
return jsonify({'error':
'Cannot delete user you are logged in'}), 403
um.removeUser(u)
return "",200
else:
return "",405
@app.route('/api/user/<int:userid>/posts', methods=['GET'])
def posts_author_search(userid):
"""
Search for posts, by author
"""
from ghostwriter.Post import Post, PostManager
pm = PostManager()
posts = pm.filterPosts(author_id = userid)
if len(posts) <= 0:
return jsonify({'error': 'No posts found'}), 404
return jsonify(post_array_to_dictionary(posts))
@app.route('/api/user/<int:userid>/posts/search', methods=['GET'])
def posts_author_search_filter(userid):
"""
Search for posts, by author
"""
vsearch = {}
title = request.args.get('title')
if not (title is None):
vsearch['title'] = title
cdate = request.args.get('cdate')
if not (cdate is None):
vsearch['creation_date'] = cdate
vsearch['author_id'] = userid
from ghostwriter.Post import Post, PostManager
pm = PostManager()
posts = pm.filterPosts(**vsearch)
if len(posts) <= 0:
return jsonify({'error': 'No posts found'}), 404
return jsonify(post_array_to_dictionary(posts))
# Admin interface
@app.route('/admin')
def show_admin_panel():
return render_template('admin.html')
@app.route('/admin/login', methods=['POST'])
def do_login():
username = request.form['username']
password = request.form['password']
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager, get_user_manager
um = get_user_manager()
u = um.getUserbyUsername(username)
if u is None:
flash('User does not exist')
return render_template('admin.html'), 401
if not u.login(password, um):
flash('Invalid user or password')
return render_template('admin.html'), 401
login_user(u)
return redirect(url_for('show_main_admin'))
@app.route('/admin/panel', methods=['GET'])
@login_required
def show_main_admin():
return render_template('main.html', navlink='dashboard')
@app.route('/admin/users', methods=['GET'])
@login_required
def show_users():
return render_template('users.html', navlink='users')
@app.route('/admin/users/create', methods=['GET'])
@login_required
def admin_create_user():
return render_template('manage_user.html', navlink='users', action='create')
@app.route('/admin/users/edit/<int:id>/', methods=['GET'])
@login_required
def admin_edit_user(id):
return render_template('manage_user.html', navlink='users', action='edit',
userid=id)
@app.route('/admin/posts', methods=['GET'])
@login_required
def show_posts():
return render_template('posts.html', navlink='posts')
@app.route('/admin/posts/create', methods=['GET'])
@login_required
def admin_create_post():
return render_template('manage_post.html', navlink='posts', action='create')
@app.route('/admin/posts/edit/<int:id>/', methods=['GET'])
@login_required
def edit_post(id):
return render_template('manage_post.html', navlink='posts',
action='edit', postid=id)
@app.route('/admin/logoff', methods=['GET'])
@login_required
def do_logout():
logout_user()
return redirect(url_for('show_admin_panel'))
# Commands
@app.cli.command()
def initdb():
""" Initialise the database """
print('Creating database')
try:
mm.create()
from ghostwriter.User import User
from ghostwriter.UserManager import UserManager
um = UserManager()
um.addUser(User('admin', 'Administrator'), 'admin')
print('Database created')
except Exception as e:
print('Error')
app.logger.error('Error while creating database: {}'.format(e))
app.secret_key = 'B1Ad99013yX R~XHHHHHHHHHH/,?RT'
| mit |
nvazquez/Turtlebots | plugins/xevents/Xlib/error.py | 2 | 4823 | # $Id: error.py,v 1.6 2007/06/10 14:11:58 mggrant Exp $
#
# Xlib.error -- basic error classes
#
# Copyright (C) 2000 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Standard modules
import string
# Xlib modules
import X
# Xlib.protocol modules
from Xlib.protocol import rq
class DisplayError(Exception):
def __init__(self, display):
self.display = display
def __str__(self):
return 'Display error "%s"' % self.display
class DisplayNameError(DisplayError):
def __str__(self):
return 'Bad display name "%s"' % self.display
class DisplayConnectionError(DisplayError):
def __init__(self, display, msg):
self.display = display
self.msg = msg
def __str__(self):
return 'Can\'t connect to display "%s": %s' % (self.display, self.msg)
class ConnectionClosedError(Exception):
def __init__(self, whom):
self.whom = whom
def __str__(self):
return 'Display connection closed by %s' % self.whom
class XauthError(Exception): pass
class XNoAuthError(Exception): pass
class ResourceIDError(Exception): pass
class XError(rq.GetAttrData, Exception):
_fields = rq.Struct( rq.Card8('type'), # Always 0
rq.Card8('code'),
rq.Card16('sequence_number'),
rq.Card32('resource_id'),
rq.Card16('minor_opcode'),
rq.Card8('major_opcode'),
rq.Pad(21)
)
def __init__(self, display, data):
self._data, data = self._fields.parse_binary(data, display, rawdict = 1)
def __str__(self):
s = []
for f in ('code', 'resource_id', 'sequence_number',
'major_opcode', 'minor_opcode'):
s.append('%s = %s' % (f, self._data[f]))
return '%s: %s' % (self.__class__, string.join(s, ', '))
class XResourceError(XError):
_fields = rq.Struct( rq.Card8('type'), # Always 0
rq.Card8('code'),
rq.Card16('sequence_number'),
rq.Resource('resource_id'),
rq.Card16('minor_opcode'),
rq.Card8('major_opcode'),
rq.Pad(21)
)
class BadRequest(XError): pass
class BadValue(XError): pass
class BadWindow(XResourceError): pass
class BadPixmap(XResourceError): pass
class BadAtom(XError): pass
class BadCursor(XResourceError): pass
class BadFont(XResourceError): pass
class BadMatch(XError): pass
class BadDrawable(XResourceError): pass
class BadAccess(XError): pass
class BadAlloc(XError): pass
class BadColor(XResourceError): pass
class BadGC(XResourceError): pass
class BadIDChoice(XResourceError): pass
class BadName(XError): pass
class BadLength(XError): pass
class BadImplementation(XError): pass
xerror_class = {
X.BadRequest: BadRequest,
X.BadValue: BadValue,
X.BadWindow: BadWindow,
X.BadPixmap: BadPixmap,
X.BadAtom: BadAtom,
X.BadCursor: BadCursor,
X.BadFont: BadFont,
X.BadMatch: BadMatch,
X.BadDrawable: BadDrawable,
X.BadAccess: BadAccess,
X.BadAlloc: BadAlloc,
X.BadColor: BadColor,
X.BadGC: BadGC,
X.BadIDChoice: BadIDChoice,
X.BadName: BadName,
X.BadLength: BadLength,
X.BadImplementation: BadImplementation,
}
class CatchError:
def __init__(self, *errors):
self.error_types = errors
self.error = None
self.request = None
def __call__(self, error, request):
if self.error_types:
for etype in self.error_types:
if isinstance(error, etype):
self.error = error
self.request = request
return 1
return 0
else:
self.error = error
self.request = request
return 1
def get_error(self):
return self.error
def get_request(self):
return self.request
def reset(self):
self.error = None
self.request = None
| mit |
ntuecon/server | pyenv/Lib/site-packages/twisted/python/roots.py | 14 | 7404 | # -*- test-case-name: twisted.test.test_roots -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Python Roots: an abstract hierarchy representation for Twisted.
Maintainer: Glyph Lefkowitz
"""
from __future__ import absolute_import, division
from twisted.python import reflect
from twisted.python._oldstyle import _oldStyle
class NotSupportedError(NotImplementedError):
"""
An exception meaning that the tree-manipulation operation
you're attempting to perform is not supported.
"""
@_oldStyle
class Request:
"""I am an abstract representation of a request for an entity.
I also function as the response. The request is responded to by calling
self.write(data) until there is no data left and then calling
self.finish().
"""
# This attribute should be set to the string name of the protocol being
# responded to (e.g. HTTP or FTP)
wireProtocol = None
def write(self, data):
"""Add some data to the response to this request.
"""
raise NotImplementedError("%s.write" % reflect.qual(self.__class__))
def finish(self):
"""The response to this request is finished; flush all data to the network stream.
"""
raise NotImplementedError("%s.finish" % reflect.qual(self.__class__))
@_oldStyle
class Entity:
"""I am a terminal object in a hierarchy, with no children.
I represent a null interface; certain non-instance objects (strings and
integers, notably) are Entities.
Methods on this class are suggested to be implemented, but are not
required, and will be emulated on a per-protocol basis for types which do
not handle them.
"""
def render(self, request):
"""
I produce a stream of bytes for the request, by calling request.write()
and request.finish().
"""
raise NotImplementedError("%s.render" % reflect.qual(self.__class__))
@_oldStyle
class Collection:
"""I represent a static collection of entities.
I contain methods designed to represent collections that can be dynamically
created.
"""
def __init__(self, entities=None):
"""Initialize me.
"""
if entities is not None:
self.entities = entities
else:
self.entities = {}
def getStaticEntity(self, name):
"""Get an entity that was added to me using putEntity.
This method will return 'None' if it fails.
"""
return self.entities.get(name)
def getDynamicEntity(self, name, request):
"""Subclass this to generate an entity on demand.
This method should return 'None' if it fails.
"""
def getEntity(self, name, request):
"""Retrieve an entity from me.
I will first attempt to retrieve an entity statically; static entities
will obscure dynamic ones. If that fails, I will retrieve the entity
dynamically.
If I cannot retrieve an entity, I will return 'None'.
"""
ent = self.getStaticEntity(name)
if ent is not None:
return ent
ent = self.getDynamicEntity(name, request)
if ent is not None:
return ent
return None
def putEntity(self, name, entity):
"""Store a static reference on 'name' for 'entity'.
Raises a KeyError if the operation fails.
"""
self.entities[name] = entity
def delEntity(self, name):
"""Remove a static reference for 'name'.
Raises a KeyError if the operation fails.
"""
del self.entities[name]
def storeEntity(self, name, request):
"""Store an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.storeEntity" % reflect.qual(self.__class__))
def removeEntity(self, name, request):
"""Remove an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.removeEntity" % reflect.qual(self.__class__))
def listStaticEntities(self):
"""Retrieve a list of all name, entity pairs that I store references to.
See getStaticEntity.
"""
return self.entities.items()
def listDynamicEntities(self, request):
"""A list of all name, entity that I can generate on demand.
See getDynamicEntity.
"""
return []
def listEntities(self, request):
"""Retrieve a list of all name, entity pairs I contain.
See getEntity.
"""
return self.listStaticEntities() + self.listDynamicEntities(request)
def listStaticNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getStaticEntity.
"""
return self.entities.keys()
def listDynamicNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getDynamicEntity.
"""
return []
def listNames(self, request):
"""Retrieve a list of all names for entities that I contain.
See getEntity.
"""
return self.listStaticNames()
class ConstraintViolation(Exception):
"""An exception raised when a constraint is violated.
"""
class Constrained(Collection):
"""A collection that has constraints on its names and/or entities."""
def nameConstraint(self, name):
"""A method that determines whether an entity may be added to me with a given name.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def entityConstraint(self, entity):
"""A method that determines whether an entity may be added to me.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def reallyPutEntity(self, name, entity):
Collection.putEntity(self, name, entity)
def putEntity(self, name, entity):
"""Store an entity if it meets both constraints.
Otherwise raise a ConstraintViolation.
"""
if self.nameConstraint(name):
if self.entityConstraint(entity):
self.reallyPutEntity(name, entity)
else:
raise ConstraintViolation("Entity constraint violated.")
else:
raise ConstraintViolation("Name constraint violated.")
class Locked(Constrained):
"""A collection that can be locked from adding entities."""
locked = 0
def lock(self):
self.locked = 1
def entityConstraint(self, entity):
return not self.locked
class Homogenous(Constrained):
"""A homogenous collection of entities.
I will only contain entities that are an instance of the class or type
specified by my 'entityType' attribute.
"""
entityType = object
def entityConstraint(self, entity):
if isinstance(entity, self.entityType):
return 1
else:
raise ConstraintViolation("%s of incorrect type (%s)" %
(entity, self.entityType))
def getNameType(self):
return "Name"
def getEntityType(self):
return self.entityType.__name__
| bsd-3-clause |
rubencabrera/odoo | addons/email_template/wizard/email_template_preview.py | 377 | 3851 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "email.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('email.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).default_get(cr, uid, fields, context=context)
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a Default
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/sample_doctest.py | 203 | 1041 | """This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print('a')
... print()
... print('b')
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
| gpl-2.0 |
f0lie/refactoring_example | main_3.py | 1 | 1401 | class Point():
def __init__(self, y_pos=0, x_pos=0, y_velo=0, x_velo=0):
self.x_pos = x_pos
self.y_pos = y_pos
self.x_velo = x_velo
self.y_velo = y_velo
def move(self):
self.x_pos += self.x_velo
self.y_pos += self.y_velo
def bounce(self, grid_size):
if self.x_pos < 0 or self.x_pos > grid_size - 1:
self.x_velo *= -1
self.x_pos += self.x_velo
if self.y_pos < 0 or self.y_pos > grid_size - 1:
self.y_velo *= -1
self.y_pos += self.y_velo
class Grid():
def __init__(self, point_list=None, grid_size=5):
self.grid = [[0]*grid_size for i in range(grid_size)]
self.grid_size = grid_size
if point_list is None:
self.points = []
else:
self.points = point_list
def step(self):
for point in self.points:
self.grid[point.y_pos][point.x_pos] = 1
for row in self.grid:
print(row)
print()
for point in self.points:
self.grid[point.y_pos][point.x_pos] = 0
for point in self.points:
point.move()
point.bounce(self.grid_size)
GRID = Grid([Point(0, 0, 1, 1),
Point(0, 0, 0, 1),
Point(4, 4, -1, -1),
Point(4, 4, 0, -1)],
grid_size=5)
for i in range(5):
GRID.step()
| mit |
jerbob92/CouchPotatoServer | libs/requests/packages/charade/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| gpl-3.0 |
gohin/django | django/contrib/auth/context_processors.py | 514 | 1938 | # PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand.
class PermLookupDict(object):
def __init__(self, user, app_label):
self.user, self.app_label = user, app_label
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.app_label, perm_name))
def __iter__(self):
# To fix 'item in perms.someapp' and __getitem__ iteraction we need to
# define __iter__. See #18979 for details.
raise TypeError("PermLookupDict is not iterable.")
def __bool__(self):
return self.user.has_module_perms(self.app_label)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class PermWrapper(object):
def __init__(self, user):
self.user = user
def __getitem__(self, app_label):
return PermLookupDict(self.user, app_label)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if '.' not in perm_name:
# The name refers to module.
return bool(self[perm_name])
app_label, perm_name = perm_name.split('.', 1)
return self[app_label][perm_name]
def auth(request):
"""
Returns context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, uses AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, 'user'):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
'user': user,
'perms': PermWrapper(user),
}
| bsd-3-clause |
kartoza/stream_feature_extractor | third_party/raven/transport/threaded.py | 25 | 2911 | """
raven.transport.threaded
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import atexit
import time
import threading
import os
from Queue import Queue
from raven.transport.base import HTTPTransport
DEFAULT_TIMEOUT = 10
class AsyncWorker(object):
_terminator = object()
def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT):
self._queue = Queue(-1)
self._lock = threading.Lock()
self._thread = None
self.options = {
'shutdown_timeout': shutdown_timeout,
}
self.start()
def main_thread_terminated(self):
size = self._queue.qsize()
if size:
timeout = self.options['shutdown_timeout']
print "Sentry is attempting to send %s pending error messages" % size
print "Waiting up to %s seconds" % timeout
if os.name == 'nt':
print "Press Ctrl-Break to quit"
else:
print "Press Ctrl-C to quit"
self.stop(timeout=timeout)
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self._thread:
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
"""
Stops the task thread. Synchronous!
"""
self._lock.acquire()
try:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def queue(self, callback, *args, **kwargs):
self._queue.put_nowait((callback, args, kwargs))
def _target(self):
while 1:
record = self._queue.get()
if record is self._terminator:
break
callback, args, kwargs = record
callback(*args, **kwargs)
time.sleep(0)
class ThreadedHTTPTransport(HTTPTransport):
scheme = ['threaded+http', 'threaded+https']
def __init__(self, parsed_url):
super(ThreadedHTTPTransport, self).__init__(parsed_url)
# remove the threaded+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def get_worker(self):
if not hasattr(self, '_worker'):
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data, headers):
super(ThreadedHTTPTransport, self).send(data, headers)
def send(self, data, headers):
self.get_worker().queue(self.send_sync, data, headers)
| gpl-2.0 |
youdonghai/intellij-community | platform/platform-resources/src/launcher.py | 1 | 2968 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
SYSTEM_PATH = u'$SYSTEM_PATH$'
def print_usage(cmd):
print(('Usage:\n' +
' {0} -h | -? | --help\n' +
' {0} [project_dir]\n' +
' {0} [-l|--line line] [project_dir|--temp-project] file[:line]\n' +
' {0} diff <left> <right>\n' +
' {0} merge <local> <remote> [base] <merged>').format(cmd))
def process_args(argv):
args = []
skip_next = False
for i, arg in enumerate(argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print_usage(argv[0])
exit(0)
elif arg == 'diff' and i == 0:
args.append(arg)
elif arg == 'merge' and i == 0:
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
path = arg
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
path = file_path
args.append(os.path.abspath(path))
return args
def try_activate_instance(args):
port_path = os.path.join(CONFIG_PATH, 'port')
token_path = os.path.join(SYSTEM_PATH, 'token')
if not (os.path.exists(port_path) and os.path.exists(token_path)):
return False
with open(port_path) as pf:
port = int(pf.read())
with open(token_path) as tf:
token = tf.read()
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except (socket.error, IOError):
return False
found = False
while True:
try:
path_len = struct.unpack('>h', s.recv(2))[0]
path = s.recv(path_len)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except (socket.error, IOError):
return False
if found:
cmd = 'activate ' + token + '\0' + os.getcwd() + '\0' + '\0'.join(args)
encoded = struct.pack('>h', len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close the socket immediately
return True
return False
def start_new_instance(args):
if sys.platform == 'darwin':
if len(args) > 0:
args.insert(0, '--args')
os.execvp('open', ['-a', RUN_PATH] + args)
else:
bin_file = os.path.split(RUN_PATH)[1]
os.execv(RUN_PATH, [bin_file] + args)
ide_args = process_args(sys.argv)
if not try_activate_instance(ide_args):
start_new_instance(ide_args)
| apache-2.0 |
prasadtalasila/IRCLogParser | lib/nickTracker.py | 1 | 6860 | import re
import lib.config as config
import lib.util as util
from datetime import date
def nick_tracker(log_dict, track_users_on_channels = False):
"""
Tracks all nicks and the identifies nicks which point to same user
Args:
log_dict(dictionary): with key as dateTime.date object and value as {"data":datalist,"channel_name":channels name}
Returns:
nicks(list): all nicks
nick_same_list(list): list of lists with each list corresponding to nicks of same user
"""
nicks = [] # list of all the nicknames
nick_same_list = [[] for i in range(config.MAX_EXPECTED_DIFF_NICKS)]
nick_channel_dict = []
channels_for_user = []
nicks_hash = []
channels_hash = []
#Getting all the nicknames in a list
def nick_append(nick, nicks, nicks_today_on_this_channel, track_users_on_channels):
if track_users_on_channels and (nick not in nicks_today_on_this_channel):
nicks_today_on_this_channel.append(nick) #not nicks as there are same nicks spread across multiple channels
nicks.append(nick)
elif nick not in nicks:
nicks.append(nick)
return nicks, nicks_today_on_this_channel
for day_content_all_channels in log_dict.values():
#traverse over data of different channels for that day
channels_for_user_day = {}#empty for next day usage
for day_content in day_content_all_channels:
day_log = day_content["log_data"]
channel_name = day_content["auxiliary_data"]["channel"]
nicks_today_on_this_channel = []
for i in day_log:
# use regex to get the string between <> and appended it to the nicks list
if(util.check_if_msg_line (i)):
m = re.search(r"\<(.*?)\>", i)
nick = util.correctLastCharCR(m.group(0)[1:-1])
nicks, nicks_today_on_this_channel = nick_append(nick, nicks, nicks_today_on_this_channel, track_users_on_channels)
''' Forming list of lists for avoiding nickname duplicacy '''
for line in day_log:
if(line[0] == '=' and "changed the topic of" not in line):
old_nick = util.splice_find(line, "=", " is", 3)
new_nick = util.splice_find(line, "wn as", "\n", 5)
nicks, nicks_today_on_this_channel = nick_append(old_nick, nicks, nicks_today_on_this_channel, track_users_on_channels)
nicks, nicks_today_on_this_channel = nick_append(new_nick, nicks, nicks_today_on_this_channel, track_users_on_channels)
#nicks.append(new_nick)
for i in range(config.MAX_EXPECTED_DIFF_NICKS):
if old_nick in nick_same_list[i] or new_nick in nick_same_list[i]:
if old_nick not in nick_same_list[i]:
nick_same_list[i].append(old_nick)
if new_nick not in nick_same_list[i]:
nick_same_list[i].append(new_nick)
break
if not nick_same_list[i]:
if old_nick not in nick_same_list[i]:
nick_same_list[i].append(old_nick)
if new_nick not in nick_same_list[i]:
nick_same_list[i].append(new_nick)
break
if track_users_on_channels:
'''
Creating list of dictionaries nick_channel_dict of the format :
[{'nickname':'rohan', 'channels':['[#abc', 0],['#bcd', 0]]},{}]
'''
considered_nicks = []
if config.DEBUGGER:
print "Analysis on", (str(day_content["auxiliary_data"]["day"]) + "-" + str(day_content["auxiliary_data"]["month"])), channel_name
for user in nicks_today_on_this_channel:
f = 1
for nick_tuple in nick_same_list:
if user in nick_tuple:
user_nick = nick_tuple[0]
f = 0
break
if f:
user_nick = user
'''for channels of user on a day'''
if channels_for_user_day.has_key(user_nick) and channel_name not in channels_for_user_day[user_nick]:
channels_for_user_day[user_nick].append(channel_name)
else:
channels_for_user_day[user_nick] = [channel_name]
flag = 1
for dictionary in nick_channel_dict:
if dictionary['nickname'] == user_nick and user_nick not in considered_nicks:
index = searchChannel(channel_name, dictionary['channels'])
if index == -1:
dictionary['channels'].append([channel_name,1])
else:
dictionary['channels'][index][1]+=1
flag = 0
considered_nicks.append(user_nick)
break
if flag:
nick_channel_dict.append({'nickname':user_nick, 'channels': [[channel_name, 1]]})
considered_nicks.append(user_nick)
channels_for_user.append(channels_for_user_day)
for nick in nicks:
for index in range(config.MAX_EXPECTED_DIFF_NICKS):
if nick in nick_same_list[index]:
break
if not nick_same_list[index]:
nick_same_list[index].append(nick)
break
if config.DEBUGGER:
print "========> 30 on " + str(len(nicks)) + " nicks"
print nicks[:30]
print "========> 30 on " + str(len(nick_same_list)) + " nick_same_list"
print nick_same_list[:30]
if not track_users_on_channels:
return [nicks, nick_same_list]
else:
for dicts in nick_channel_dict:
nick = dicts['nickname']
if nick not in nicks_hash:
nicks_hash.append(nick)
for channel in dicts['channels']:
if channel[0] not in channels_hash:
channels_hash.append(channel[0])
return [nicks, nick_same_list, channels_for_user, nick_channel_dict, nicks_hash, channels_hash]
def searchChannel(channel, channel_list):
ans = -1
i = 0
for c_tuple in channel_list:
if c_tuple[0] == channel:
ans = i
break
i += 1
return ans
| gpl-3.0 |
okanokumus/Programming | Python/IntroductionPython/binaryTOdecimal.py | 1 | 1842 | # binary to decimal and vice versa
def choice():
ch = input("press 1 : binary to decimal\n"
"press 2 : decimal to binary\n"
": ")
if ch == '1':
print("binary to decimal converion")
number = int(input("enter your number that you want to convert decimal: "))
if number >= 0:
binary_to_decimal(number)
else:
print("given number must be greater than 0")
elif ch == '2':
print("decimal to binary converion")
number = int(input("enter your number that you want to convert binary: "))
if number >= 0:
decimal_to_binary(number)
else:
print("given number must be greater than 0")
else:
print("wrong choice")
exit()
# decimal to binary conversion
def decimal_to_binary(number):
digits = []
binaryNumber = "" # blank
if number == 0:
binaryNumber = 0
else:
while (number > 0):
digits.append(number % 2)
number = int(number / 2)
digits.reverse()
for x in digits:
binaryNumber += str(x)
print("[MS ... LS]: ",binaryNumber)
# binary to decimal conversion
def binary_to_decimal(number):
decimals = [] # store the each elements in given num
decimalNumber = 0
while (number > 0):
decimals.append(number % 10)
number = int(number / 10)
i = 0
for x in decimals:
# in the mean time to control given number is binary or not
if (x == 0) or (x == 1):
decimalNumber += x * (2 ** i)
i += 1
else:
print("only zero or one can be accepted as in binary values")
exit()
print("[MS ... LS]: ", decimalNumber)
# main function
def main():
choice()
if __name__ == "__main__":
main() | gpl-3.0 |
noironetworks/neutron | neutron/tests/unit/privileged/agent/linux/test_netlink_lib.py | 2 | 16397 | # Copyright (c) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants
import testtools
from neutron.common import exceptions
from neutron.privileged.agent.linux import netlink_constants as nl_constants
from neutron.privileged.agent.linux import netlink_lib as nl_lib
from neutron.tests import base
FAKE_ICMP_ENTRY = {'ipversion': 4, 'protocol': 'icmp',
'type': '8', 'code': '0', 'id': 1234,
'src': '1.1.1.1', 'dst': '2.2.2.2', 'zone': 1}
FAKE_TCP_ENTRY = {'ipversion': 4, 'protocol': 'tcp',
'sport': 1, 'dport': 2,
'src': '1.1.1.1', 'dst': '2.2.2.2', 'zone': 1}
FAKE_UDP_ENTRY = {'ipversion': 4, 'protocol': 'udp',
'sport': 1, 'dport': 2,
'src': '1.1.1.1', 'dst': '2.2.2.2', 'zone': 1}
class NetlinkLibTestCase(base.BaseTestCase):
def setUp(self):
super(NetlinkLibTestCase, self).setUp()
nl_lib.nfct = mock.Mock()
nl_lib.libc = mock.Mock()
def test_open_new_conntrack_handler_failed(self):
nl_lib.nfct.nfct_open.return_value = None
with testtools.ExpectedException(exceptions.CTZoneExhaustedError):
with nl_lib.ConntrackManager():
nl_lib.nfct.nfct_open.assert_called_once_with()
nl_lib.nfct.nfct_close.assert_not_called()
def test_open_new_conntrack_handler_pass(self):
with nl_lib.ConntrackManager():
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK, nl_constants.NFNL_SUBSYS_CTNETLINK)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK, nl_constants.NFNL_SUBSYS_CTNETLINK))
def test_conntrack_list_entries(self):
with nl_lib.ConntrackManager() as conntrack:
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK, nl_constants.NFNL_SUBSYS_CTNETLINK)
conntrack.list_entries()
nl_lib.nfct.nfct_callback_register.assert_has_calls(
[mock.call(nl_lib.nfct.nfct_open(), nl_constants.NFCT_T_ALL,
mock.ANY, None)])
nl_lib.nfct.nfct_query.assert_called_once_with(
nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK),
nl_constants.NFCT_Q_DUMP,
mock.ANY)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK, nl_constants.NFNL_SUBSYS_CTNETLINK))
def test_conntrack_new_failed(self):
nl_lib.nfct.nfct_new.return_value = None
with nl_lib.ConntrackManager() as conntrack:
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK)
conntrack.delete_entries([FAKE_ICMP_ENTRY])
nl_lib.nfct.nfct_new.assert_called_once_with()
nl_lib.nfct.nfct_destroy.assert_called_once_with(None)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK))
def test_conntrack_delete_icmp_entry(self):
conntrack_filter = mock.Mock()
nl_lib.nfct.nfct_new.return_value = conntrack_filter
with nl_lib.ConntrackManager() as conntrack:
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK)
conntrack.delete_entries([FAKE_ICMP_ENTRY])
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_L3PROTO,
nl_constants.IPVERSION_SOCKET[4]),
mock.call(conntrack_filter,
nl_constants.ATTR_L4PROTO,
constants.IP_PROTOCOL_MAP['icmp']),
mock.call(conntrack_filter,
nl_constants.ATTR_ICMP_CODE,
int(FAKE_ICMP_ENTRY['code'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ICMP_TYPE,
int(FAKE_ICMP_ENTRY['type']))
]
nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_ICMP_ID,
nl_lib.libc.htons(FAKE_ICMP_ENTRY['id'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ZONE,
int(FAKE_ICMP_ENTRY['zone']))
]
nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_SRC,
conntrack._convert_text_to_binary(
FAKE_ICMP_ENTRY['src'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_DST,
conntrack._convert_text_to_binary(
FAKE_ICMP_ENTRY['dst'], 4)
),
]
nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True)
nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK))
def test_conntrack_delete_udp_entry(self):
conntrack_filter = mock.Mock()
nl_lib.nfct.nfct_new.return_value = conntrack_filter
with nl_lib.ConntrackManager() as conntrack:
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK)
conntrack.delete_entries([FAKE_UDP_ENTRY])
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_L3PROTO,
nl_constants.IPVERSION_SOCKET[4]),
mock.call(conntrack_filter,
nl_constants.ATTR_L4PROTO,
constants.IP_PROTOCOL_MAP['udp'])
]
nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_SRC,
nl_lib.libc.htons(FAKE_UDP_ENTRY['sport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_DST,
nl_lib.libc.htons(FAKE_UDP_ENTRY['dport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ZONE,
int(FAKE_ICMP_ENTRY['zone']))
]
nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_SRC,
conntrack._convert_text_to_binary(
FAKE_UDP_ENTRY['src'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_DST,
conntrack._convert_text_to_binary(
FAKE_UDP_ENTRY['dst'], 4)
),
]
nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True)
nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK))
def test_conntrack_delete_tcp_entry(self):
conntrack_filter = mock.Mock()
nl_lib.nfct.nfct_new.return_value = conntrack_filter
with nl_lib.ConntrackManager() as conntrack:
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK)
conntrack.delete_entries([FAKE_TCP_ENTRY])
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_L3PROTO,
nl_constants.IPVERSION_SOCKET[4]),
mock.call(conntrack_filter,
nl_constants.ATTR_L4PROTO,
constants.IP_PROTOCOL_MAP['tcp'])
]
nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_SRC,
nl_lib.libc.htons(FAKE_TCP_ENTRY['sport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_DST,
nl_lib.libc.htons(FAKE_TCP_ENTRY['dport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ZONE,
int(FAKE_ICMP_ENTRY['zone']))
]
nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_SRC,
conntrack._convert_text_to_binary(
FAKE_TCP_ENTRY['src'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_DST,
conntrack._convert_text_to_binary(
FAKE_TCP_ENTRY['dst'], 4)
),
]
nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True)
nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK))
def test_conntrack_delete_entries(self):
conntrack_filter = mock.Mock()
nl_lib.nfct.nfct_new.return_value = conntrack_filter
with nl_lib.ConntrackManager() as conntrack:
nl_lib.nfct.nfct_open.assert_called_once_with(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK)
conntrack.delete_entries([FAKE_ICMP_ENTRY,
FAKE_TCP_ENTRY,
FAKE_UDP_ENTRY])
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_L3PROTO,
nl_constants.IPVERSION_SOCKET[4]),
mock.call(conntrack_filter,
nl_constants.ATTR_L4PROTO,
constants.IP_PROTOCOL_MAP['tcp']),
mock.call(conntrack_filter,
nl_constants.ATTR_L3PROTO,
nl_constants.IPVERSION_SOCKET[4]),
mock.call(conntrack_filter,
nl_constants.ATTR_L4PROTO,
constants.IP_PROTOCOL_MAP['udp']),
mock.call(conntrack_filter,
nl_constants.ATTR_L3PROTO,
nl_constants.IPVERSION_SOCKET[4]),
mock.call(conntrack_filter,
nl_constants.ATTR_L4PROTO,
constants.IP_PROTOCOL_MAP['icmp']),
mock.call(conntrack_filter,
nl_constants.ATTR_ICMP_CODE,
int(FAKE_ICMP_ENTRY['code'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ICMP_TYPE,
int(FAKE_ICMP_ENTRY['type']))
]
nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_SRC,
nl_lib.libc.htons(FAKE_TCP_ENTRY['sport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_DST,
nl_lib.libc.htons(FAKE_TCP_ENTRY['dport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ZONE,
int(FAKE_TCP_ENTRY['zone'])),
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_SRC,
nl_lib.libc.htons(FAKE_UDP_ENTRY['sport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_PORT_DST,
nl_lib.libc.htons(FAKE_UDP_ENTRY['dport'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ZONE,
int(FAKE_UDP_ENTRY['zone'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ICMP_ID,
nl_lib.libc.htons(FAKE_ICMP_ENTRY['id'])),
mock.call(conntrack_filter,
nl_constants.ATTR_ZONE,
int(FAKE_ICMP_ENTRY['zone']))
]
nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls,
any_order=True)
calls = [
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_SRC,
conntrack._convert_text_to_binary(
FAKE_TCP_ENTRY['src'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_DST,
conntrack._convert_text_to_binary(
FAKE_TCP_ENTRY['dst'], 4)),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_SRC,
conntrack._convert_text_to_binary(
FAKE_UDP_ENTRY['src'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_DST,
conntrack._convert_text_to_binary(
FAKE_UDP_ENTRY['dst'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_SRC,
conntrack._convert_text_to_binary(
FAKE_ICMP_ENTRY['src'], 4)
),
mock.call(conntrack_filter,
nl_constants.ATTR_IPV4_DST,
conntrack._convert_text_to_binary(
FAKE_ICMP_ENTRY['dst'], 4)
),
]
nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True)
nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter)
nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open(
nl_constants.CONNTRACK,
nl_constants.NFNL_SUBSYS_CTNETLINK))
| apache-2.0 |
kkoci/orthosie | register/models.py | 1 | 10309 | # Copyright 2013 Jack David Baucum
#
# This file is part of Orthosie.
#
# Orthosie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Orthosie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Orthosie. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils import timezone
from django.conf import settings
from decimal import Decimal
import time
from django.core.exceptions import ObjectDoesNotExist
class Shift(models.Model):
begin_date = models.DateTimeField(auto_now=True)
finish_date = models.DateTimeField(null=True)
def __unicode__(self):
return str(self.begin_date) + ' to ' + str(self.finish_date)
def end_shift(self):
if self.finish_date is None:
self.print_z_report()
self.finish_date = timezone.now()
self.save()
return
def create_transaction(self):
if self.finish_date is None:
return self.transaction_set.create(begin_date=timezone.now())
@staticmethod
def get_current():
try:
current_shift = Shift.objects.get(finish_date=None)
except ObjectDoesNotExist:
current_shift = Shift()
current_shift.save()
return current_shift
def get_totals(self):
sub_total = Decimal(0.0)
tax = Decimal(0.0)
total = Decimal(0.0)
transaction_count = 0
for transaction in self.transaction_set.all():
transaction_count += 1
totals = transaction.get_totals()
sub_total = sub_total + totals.sub_total
tax = tax + totals.tax_total
total = sub_total + tax
shift_total = ShiftTotal(sub_total, tax, total, transaction_count)
return shift_total
def print_z_report(self):
z = ZReport(self)
z.print_out()
class Meta:
ordering = ['begin_date']
class Transaction(models.Model):
shift = models.ForeignKey(Shift, on_delete=models.CASCADE)
begin_date = models.DateTimeField()
finish_date = models.DateTimeField(null=True)
status = models.CharField(max_length=10, default='Started')
def __unicode__(self):
return str(self.begin_date) + ' to ' + str(self.finish_date)
def end_transaction(self):
if self.finish_date is None:
try:
self.print_receipt()
except PrinterNotFound:
raise
finally:
self.finish_date = timezone.now()
self.save()
@staticmethod
def get_current():
try:
current_transaction = Transaction.objects.get(finish_date=None)
except ObjectDoesNotExist:
current_transaction = Shift.get_current().create_transaction()
return current_transaction
def print_receipt(self):
r = Receipt(self)
r.print_out()
def create_line_item(self, item, quantity, scale=None):
if self.finish_date is None:
try:
code = item.upc
description = item.vendor.name + ' ' + item.name
except AttributeError:
code = item.plu
description = (item.size + ' ' + item.name) if item.size\
else item.name
return self.lineitem_set.create(
item=item,
quantity=quantity,
code=code,
scale=scale,
description=description,
price=item.price
)
def create_tender(self, amount, transaction_type):
if transaction_type in ('CASH', 'CHECK', 'CREDIT', 'EBT'):
tender = self.tender_set.create(
amount=amount,
type=transaction_type
)
if self.get_totals().total <= 0:
self.end_transaction()
return tender
def get_totals(self):
total = Decimal(0.0)
tax = Decimal(0.0)
for line_item in self.lineitem_set.all():
if line_item.status == 'ACTIVE':
total = total + line_item.price
if line_item.item.taxable:
tax = Decimal(tax) + line_item.price * Decimal('.07')
tax = tax.quantize(Decimal(10) ** -2).normalize()
paid_total = 0
for tender in self.tender_set.all():
paid_total = paid_total + tender.amount
transaction_total = TransactionTotal(total, tax, paid_total)
return transaction_total
def cancel(self):
self.status = 'CANCELED'
self.end_transaction()
for line_item in self.lineitem_set.all():
line_item.cancel()
line_item.save()
class Meta:
ordering = ['begin_date']
class LineItem(models.Model):
transaction = models.ForeignKey(Transaction, on_delete=models.CASCADE)
code = models.CharField(max_length=30)
quantity = models.DecimalField(max_digits=15, decimal_places=0)
scale = models.DecimalField(max_digits=19, decimal_places=4, null=True)
description = models.CharField(max_length=100)
price = models.DecimalField(max_digits=17, decimal_places=2)
item = models.ForeignKey('inventory.Item', on_delete=models.CASCADE)
status = models.CharField(max_length=8, default='ACTIVE')
def __unicode__(self):
return str(self.scale) + ' x ' + self.description + ' ' +\
self.description
def total(self):
return self.price * self.quantity
def cancel(self):
self.status = 'INACTIVE'
class Tender(models.Model):
transaction = models.ForeignKey(Transaction, on_delete=models.CASCADE)
amount = models.DecimalField(max_digits=17, decimal_places=2)
type = models.CharField(max_length=30)
class TransactionTotal:
def __init__(self, sub_total, tax_total, paid_total):
self.sub_total = sub_total
self.tax_total = tax_total
self.paid_total = paid_total
self.total = sub_total + tax_total - paid_total
class ShiftTotal:
def __init__(self, sub_total, tax_total, total, transaction_count):
self.sub_total = sub_total
self.tax_total = tax_total
self.total = total
self.transaction_count = transaction_count
class Receipt:
def __init__(self, transaction, lines=None):
self.transaction = transaction
self.header = settings.RECEIPT_HEADER
self.footer = settings.RECEIPT_FOOTER
self.lines = lines
self.printer = Printer(settings.PRINTER)
self.printer.open()
def print_out(self):
self.print_header()
self.print_body()
self.print_footer()
self.printer.kick_drawer()
self.printer.cut()
self.printer.close()
def print_header(self):
self.printer.print_line('\n'.join(settings.RECEIPT_HEADER))
self.printer.print_line(
time.strftime('%Y-%m-%d %H:%M:%S') + '\n' + '\n'
)
def print_footer(self):
self.printer.print_line('\n'.join(settings.RECEIPT_FOOTER))
def print_body(self):
trans_totals = self.transaction.get_totals()
for line_item in self.transaction.lineitem_set.all():
self.printer.print_line(
str(line_item.quantity).ljust(4) +
line_item.description.ljust(38)[:38] +
"{:13,.2f}".format(line_item.price) +
(line_item.item.taxable and 'T' or ' ') + '\n'
)
self.printer.print_line('\n')
self.printer.print_line(
'SubTotal: ' + "{:16,.2f}".format(trans_totals.sub_total) +
' Tax: ' + "{:23,.2f}".format(trans_totals.tax_total) + '\n'
)
self.printer.print_line(
'Total: ' + "{:19,.2f}".format(trans_totals.sub_total +
trans_totals.tax_total) +
' Change: ' +
"{:20,.2f}".format(
trans_totals.total) +
'\n\n'
)
class ZReport:
def __init__(self, shift):
self.shift = shift
self.printer = Printer(settings.PRINTER)
self.printer.open()
def print_out(self):
totals = self.shift.get_totals()
self.printer.print_line(
'Transactions: ' + str(totals.transaction_count) + '\n'
)
self.printer.print_line(
'SubTotal: ' + str(totals.sub_total) + '\n')
self.printer.print_line(
'TaxTotal: ' + str(totals.tax_total) + '\n')
self.printer.print_line('Total: ' + str(totals.total) + '\n')
self.printer.kick_drawer()
self.printer.cut()
self.printer.close()
class Printer:
def __init__(self, spool):
self.spool = spool
def open(self):
# This is kind of a hacky way to make this work in Python 2.7.
# IOError can be raised in situations other than the file (printer)
# not existing so this should probably be tightened up.
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
self._printer = open(self.spool, 'w')
except FileNotFoundError:
raise PrinterNotFound(
'Unable to locate printer "' + self.spool + '".'
)
def close(self):
self._printer.close()
def print_line(self, line):
self._printer.write(line)
def cut(self):
for i in range(8):
self.print_line('\n')
self._printer.write(chr(27) + chr(105) + chr(10))
def kick_drawer(self):
self._printer.write(
chr(27) + chr(112) + chr(0) + chr(48) + '0' + chr(10)
)
class PrinterNotFound(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
| gpl-3.0 |
seansu4you87/kupo | projects/molt/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/sanitizer.py | 180 | 17804 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from six.moves import urllib_parse as urlparse
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
content_type_rgx = re.compile(r'''
^
# Match a content type <application>/<type>
(?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
# Match any character set and encoding
(?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
|(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
# Assume the rest is data
,.*
$
''',
re.VERBOSE)
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster', 'background', 'datasrc',
'dynsrc', 'lowsrc', 'ping', 'poster', 'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs', 'data']
acceptable_content_types = ['image/png', 'image/jpeg', 'image/gif', 'image/webp', 'image/bmp', 'text/plain']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
allowed_content_types = acceptable_content_types
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
try:
uri = urlparse.urlparse(val_unescaped)
except ValueError:
uri = None
del attrs[attr]
if uri and uri.scheme:
if uri.scheme not in self.allowed_protocols:
del attrs[attr]
if uri.scheme == 'data':
m = content_type_rgx.match(uri.path)
if not m:
del attrs[attr]
elif m.group('content_type') not in self.allowed_content_types:
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if keyword not in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| mit |
giruenf/GRIPy | algo/rockphysics/ElasticParameters.py | 1 | 1768 | from Algo import KusterToksoz as KT
from numpy import sqrt
def Misat(Vs, Dens):
mis = Vs * Vs * Dens
return mis
def Ksat(Vp, Vs, Dens):
Ks = Dens * (Vp * Vp - 4.0 * Vs * Vs / 3.0)
return Ks
def Dens(Densm, Densf, phi):
dens = (Densm * (1 - phi) + Densf * phi)
return dens
def Kdry(Ksat, Kmin, Kfl, phi):
gamma = phi * (Kmin / Kfl - 1.0)
kd = (Ksat * (gamma + 1.0) - Kmin) / (gamma - 1.0 + Ksat / Kmin)
return kd
def pq_Keys_Xu(Km, Gm, alpha):
p = KT.T(Km, Gm, alpha)
q = KT.F(Km, Gm, alpha)
return p, q
def Keys_Xu(Km, Gm, alpha, phi):
p, q = pq_Keys_Xu(Km, Gm, alpha)
Kd = Km * (1.0 - phi) ** p
Gd = Gm * (1.0 - phi) ** q
return Kd, Gd
def XW_dry(Km, Gm, alpha, phi, curphi=0.0):
_PFACTOR = 10
nphi = int(phi * _PFACTOR / alpha + 0.5) # numero de interacoes para incrementar a porosidade
if not nphi:
nphi = 1
dphi = phi / nphi # passo da porosidade
K_ = Km
G_ = Gm
Kd = Km
Gd = Gm
for i in range(nphi): # metodo DEM para calculo dos modulos de bulk e cisalhante da rocha seca
Kd = KT.Kd(K_, G_, alpha,
dphi / (1.0 - curphi - i * dphi)) # a porosidade vai sendo incrementada usando o passo dphi
Gd = KT.Gd(K_, G_, alpha, dphi / (1.0 - curphi - i * dphi))
K_ = Kd
G_ = Gd
return Kd, Gd
# def Vp_kx(Km, Gm, phi, alpha, Kfl, rho):
# p, q = pq_Keys_Xu(Km, Gm, alpha)
# Kd, G = Keys_Xu(Km, Gm, phi, p, q)
# Ks = Gassmann(Kd, Km, Kfl, phi)
# Vp = sqrt((Ks + 4.0*G/3.0)/rho)
# return Vp
def Vp(Ks, G, rho):
# print '\n\n', ((Ks + 4.0*G/3.0)/rho),'\n\n'
Vp = sqrt((Ks + 4.0 * G / 3.0) / rho)
return Vp
def Vs(G, rho):
Vs = (G / rho) ** 0.5
return Vs
| apache-2.0 |
PanYuntao/node-gyp | gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
erbridge/NQr | src/util.py | 1 | 16879 | # Utility functions and classes
import configparser
import copy
import datetime
import logging
import os.path
import platform
import queue
import threading
import traceback
#import wxversion
# wxversion.select([x for x in wxversion.getInstalled()
# if x.find('unicode') != -1])
#import wx
import errors
import events
wx = events.wx
VERSION_NUMBER = "0.1"
SYSTEM_NAME = platform.system()
MAC_NAMES = ["Mac OS X", "Darwin"]
WINDOWS_NAMES = ["Windows"]
FREEBSD_NAMES = ["FreeBSD"]
LINUX_NAMES = ["Linux"]
def de_utf8(u):
try:
return ''.join([chr(ord(x)) for x in u]).decode('utf-8')
except:
pass
return u
def plural(count):
if count == 1:
return ''
return 's'
def formatLength(rawLength):
minutes = rawLength // 60
seconds = int(rawLength - minutes * 60)
if seconds not in list(range(10)):
length = str(int(minutes)) + ":" + str(int(seconds))
else:
length = str(int(minutes)) + ":0" + str(int(seconds))
return length
# def convertToUnicode(string, debugCompletion, logging=True):
# return unicode(string, "mbcs") # the rest is now possibly unnecessary?
# try:
# unicodeString = unicode(string, "cp1252")
# except UnicodeDecodeError:
# if logging:
# debugCompletion("Found bad characters. Attempting to resolve.")
# unicodeString = u""
# for char in string:
# try:
# unicodeString += unicode(char, "cp1252")
# except UnicodeDecodeError as err:
# errStr = str(err)
# startIndex = errStr.index("0x")
# endIndex = errStr.index(" ", startIndex)
# hexStr = ""
# for i in range(startIndex, endIndex):
# hexStr += errStr[i]
# unicodeString += unichr(int(hexStr, 16))
# if logging:
# debugCompletion("Bad characters resolved.")
# return unicodeString
def doNothing():
pass
# def extractTraceStack(trace=None):
# newTrace = traceback.extract_stack()[:-1]
# if trace is None:
# return newTrace
## for index in range(len(trace)):
## if trace[index] != newTrace[index]:
## return trace + newTrace[index:]
## return newTrace
# return trace + newTrace
def getTrace(maybeTraceCallbackOrList=None):
if isinstance(maybeTraceCallbackOrList, BaseCallback):
return maybeTraceCallbackOrList.getTrace()[:-1]
trace = traceback.extract_stack()[:-1]
if maybeTraceCallbackOrList is not None:
return maybeTraceCallbackOrList + trace
return trace
def validateNumeric(textCtrl):
text = textCtrl.GetValue()
for char in text:
if not char.isdigit():
wx.MessageBox("Must be numeric only!", "Error")
textCtrl.SetBackgroundColour("pink")
textCtrl.SetFocus()
textCtrl.Refresh()
return False
textCtrl.SetBackgroundColour(
wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
textCtrl.Refresh()
return True
def validateDirectory(textCtrl):
text = textCtrl.GetValue()
if not os.path.isdir(text):
wx.MessageBox("Must be existing directory path!", "Error")
textCtrl.SetBackgroundColour("pink")
textCtrl.SetFocus()
textCtrl.Refresh()
return False
textCtrl.SetBackgroundColour(
wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
textCtrl.Refresh()
return True
def _doRough(time, bigDivider, bigName, littleDivider, littleName):
big = int((time + littleDivider / 2) / littleDivider / bigDivider)
little = int(((time + littleDivider / 2) / littleDivider) % bigDivider)
timeString = ""
if big != 0:
timeString = str(big) + " " + bigName + plural(big)
if little != 0:
if timeString:
timeString += " " + \
str(little) + " " + littleName + plural(little)
else:
timeString = str(little) + " " + littleName + plural(little)
return timeString
def roughAge(time):
"""
Return a string roughly describing the time difference handed in.
"""
if time < 60 * 60:
return _doRough(time, 60, "minute", 1, "second")
if time < 24 * 60 * 60:
return _doRough(time, 60, "hour", 60, "minute")
if time < 7 * 24 * 60 * 60:
return _doRough(time, 24, "day", 60 * 60, "hour")
if time < 365 * 24 * 60 * 60:
return _doRough(time, 7, "week", 24 * 60 * 60, "day")
# Yes, this measure of a year is fairly crap :-).
return _doRough(time, 52, "year", 7 * 24 * 60 * 60, "week")
# FIXME: Implement for other systems (maybe see:
# www.cyberciti.biz/faq/howto-display-list-of-all-installed-software/).
def getIsInstalled(softwareName):
if SYSTEM_NAME in WINDOWS_NAMES:
import wmi
import winreg
result, names = wmi.Registry().EnumKey(
hDefKey=winreg.HKEY_LOCAL_MACHINE,
sSubKeyName=r"Software\Microsoft\Windows\CurrentVersion\Uninstall")
if softwareName in names:
return True
return False
return True
# FIXME: Implement updating.
def getUpdate():
return None
def doUpdate():
pass
def postEvent(lock, target, event):
try:
if lock is not None:
with lock:
wx.PostEvent(target, event)
elif lock is None:
wx.PostEvent(target, event)
except TypeError as err:
if str(err) != ("in method 'PostEvent', expected argument 1 of type"
+ " 'wxEvtHandler *'"):
raise
raise errors.NoEventHandlerError
def postDebugLog(lock, target, logger, message):
# TODO: Concurrency issue?
if logger.isEnabledFor(logging.DEBUG):
try:
postEvent(lock, target, events.LogEvent(logger, "debug", message))
except errors.NoEventHandlerError:
logger.debug("(post error)" + message)
def postInfoLog(lock, target, logger, message):
try:
postEvent(lock, target, events.LogEvent(logger, "info", message))
except errors.NoEventHandlerError:
logger.info("(post error)" + message)
def postErrorLog(lock, target, logger, message):
try:
postEvent(lock, target, events.LogEvent(logger, "error", message))
except errors.NoEventHandlerError:
logger.error("(post error)" + message)
def postWarningLog(lock, target, logger, message):
try:
postEvent(lock, target, events.LogEvent(logger, "warning", message))
except errors.NoEventHandlerError:
logger.warning("(post error)" + message)
class EventPoster:
def __init__(self, window, logger, lock):
self._window = window
self._logger = logger
self._lock = lock
def postEvent(self, event):
postEvent(self._lock, self._window, event)
def postDebugLog(self, message):
postDebugLog(self._lock, self._window, self._logger, message)
def postInfoLog(self, message):
postInfoLog(self._lock, self._window, self._logger, message)
def postErrorLog(self, message):
postErrorLog(self._lock, self._window, self._logger, message)
def postWarningLog(self, message):
postWarningLog(self._lock, self._window, self._logger, message)
class RedirectText:
def __init__(self, out, sysout):
self._out = sysout
self._out2 = out
def write(self, string):
self._out.write(string)
start, end = self._out2.GetSelection()
self._out2.AppendText(string)
if start != end:
self._out2.SetSelection(start, end)
class RedirectErr(RedirectText):
def __init__(self, textCtrl, stderr):
RedirectText.__init__(self, textCtrl, stderr)
class RedirectOut(RedirectText):
def __init__(self, textCtrl, stdout):
RedirectText.__init__(self, textCtrl, stdout)
class BaseCallback:
# FIXME: Catch all errors and re-raise with trace (possibly done?).
def __init__(self, completion, traceCallbackOrList=None):
self._completion = completion
self._trace = getTrace(traceCallbackOrList)[:-1]
def getTrace(self):
return getTrace(self._trace)[:-1]
def _complete(self, *args, **kwargs):
try:
self._completion(*args, **kwargs)
except errors.Error as err:
if err.getTrace() is not None:
raise
raise err(trace=self.getTrace())
class Callback(BaseCallback):
def __call__(self, *args, **kwargs):
self._complete(self, *args, **kwargs)
class MultiCompletion(BaseCallback):
def __init__(self, number, completion, traceCallback=None):
BaseCallback.__init__(self, completion, traceCallback)
self._slots = [None] * number
self._puts = [False] * number
def __call__(self, slot, value):
if self._puts[slot]:
raise errors.MultiCompletionPutError(trace=getTrace(self))
self._slots[slot] = value
self._puts[slot] = True
if False not in self._puts:
self._complete(*self._slots)
class ErrorCompletion(BaseCallback):
def __init__(self, exceptions, completion, traceCallbackOrList=None):
BaseCallback.__init__(self, completion, traceCallbackOrList)
if isinstance(exceptions, list):
self._exceptions = exceptions
else:
self._exceptions = [exceptions]
def __call__(self, err, *args, **kwargs):
for exception in self._exceptions:
if isinstance(err, exception) or err is exception:
self._complete(*args, **kwargs)
return
raise err
class BasePrefsPage(wx.Panel):
def __init__(self, parent, configParser, logger, sectionName, *args,
**kwargs):
wx.Panel.__init__(self, parent)
self._configParser = configParser
self._logger = logger
self._sectionName = sectionName
self._setDefaults(*args, **kwargs)
self._settings = {}
try:
self._configParser.add_section(self._sectionName)
except configparser.DuplicateSectionError:
pass
self._loadSettings()
def savePrefs(self):
self._logger.debug("Saving \'" + self._sectionName + "\' preferences.")
for (name, value) in list(self._settings.items()):
self.setSetting(name, value)
def setSetting(self, name, value):
self._configParser.set(self._sectionName, name, str(value))
def _setDefaults(self, *args, **kwargs): # Override me.
pass
def _loadSettings(self): # Override me.
pass
class BaseThread(threading.Thread, EventPoster):
def __init__(self, parent, name, logger, errcallback, lock,
raiseEmpty=False, doneQueueLength=50):
# FIXME: This doc string is wrong.
"""FIXME
Arguments:
- parent: the target for `wx.PostEvent()` if either the track changes
or the end of the playlist is reached.
- lock: the `threading.Lock()` shared by all threads with the same parent
to prevent concurrency issues when calling `wx.PostEvent()`.
- db: the database.
- player: the player.
- trackFactory: the track factory.
- loggerFactory: the logger factory.
- trackCheckDelay: the delay in seconds between checks of the player for track
changes and end of playlist.
"""
threading.Thread.__init__(self, name=name)
EventPoster.__init__(self, parent, logger, lock)
self._name = name
self._errcallback = errcallback
self._queue = queue.PriorityQueue()
self._doneQueue = CircularQueue(doneQueueLength)
self._eventCount = 0
self._abortCount = 0
self._emptyCount = 0
self._raisedEmpty = raiseEmpty
self._interrupt = False
self._runningLock = threading.Lock()
def queue(self, thing, traceCallbackOrList=None, priority=2):
thing = Callback(thing, traceCallbackOrList)
self._eventCount += 1
self._queue.put((priority, self._eventCount, thing))
if self._raisedEmpty:
self._raisedEmpty = False
self._queueEmptyQueueCallback()
def start_(self, trace=None):
self._trace = getTrace(trace)
self.start()
def run(self):
self.postDebugLog("Starting \'" + self._name + "\' thread.")
with self._runningLock:
self._run()
while True:
try:
self._pop()
except errors.AbortThreadSignal:
# FIXME: Make more deterministic.
if self._abortCount > 20:
self._abort()
break
self.abort()
self._abortCount += 1
except errors.EmptyQueueError as err:
# FIXME: Make more deterministic.
if self._emptyCount > 20:
if self._errcallback is not None:
self._raise(err, self._errcallback)
self._raisedEmpty = True
elif not self._raisedEmpty:
self._emptyCount += 1
self._queueEmptyQueueCallback()
self.postDebugLog("\'" + self._name + "\' thread stopped.")
def _run(self):
pass
def _pop(self):
got = self._queue.get()
self._doneQueue.append(got)
self._queueCallback(got[2])
self._abortCount = 0
self._emptyCount = 0
def _abort(self):
pass
def _queueCallback(self, completion, *args, **kwargs):
completion(*args, **kwargs)
def _raise(self, err, errcompletion=None):
try:
errcompletion(err)
except:
self.postEvent(events.ExceptionEvent(err))
def _queueEmptyQueueCallback(self):
self.queue(self._emptyQueueCallback, priority=999)
def _emptyQueueCallback(self, thisCallback, *args, **kwargs):
raise errors.EmptyQueueError(trace=thisCallback.getTrace())
def setAbortInterrupt(self, interrupt):
self._interrupt = interrupt
def abort(self, abortMainThread=True):
self._abortMainThread = abortMainThread
if self._interrupt:
priority = 0
else:
priority = 1000
self.queue(self._abortCallback, priority=priority)
def _abortCallback(self, thisCallback, *args, **kwargs):
raise errors.AbortThreadSignal()
def dumpQueue(self, filename, extraLines=0):
dump = copy.copy(self._queue.queue)
file = open(filename, "w")
for item, time in self._doneQueue:
if item is not None:
file.write(self._dumpQueueFormatter(item, extraLines, time))
file.write(("-" * 100 + "\n\n\n") * 2)
for item in dump:
file.write(self._dumpQueueFormatter(item, extraLines))
file.close()
def _dumpQueueFormatter(self, item, extraLines=0, time=None):
trace = "\tTraceback (most recent call last):\n" + "".join([
line for line in traceback.format_list(
getTrace(item[2])[:-(8 + extraLines)])])
traceHash = str(hash(trace))
if time is None:
time = datetime.datetime.now()
return (time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] +
" Priority: " + str(item[0]) +
" Event Number: " + str(item[1]) +
" Object: " + str(item[2]) +
" Trace Hash: " + traceHash + "\n\n" +
trace + "\n\n\n")
def getRunningLock(self):
return self._runningLock
class CircularQueue:
def __init__(self, size):
self._queue = [(None, None)] * size
def append(self, item):
self._queue.pop(0)
self._queue.append((item, datetime.datetime.now()))
def __getitem__(self, index):
return self._queue[index][0], self._queue[index][1]
class EventLogger:
def __init__(self, length=100):
self._queue = CircularQueue(length)
self("---INIT---", None)
def __call__(self, eventString, event):
self._queue.append((eventString, event))
def done(self):
self("---DONE---", None)
def dump(self, filename):
file = open(filename, "w")
for item, time in self._queue:
if item is not None:
file.write(self._dumpFormatter(item, time))
file.close()
def _dumpFormatter(self, item, time=None):
if time is None:
time = datetime.datetime.now()
return (time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] +
" Event: " + item[0] + "\n")
| bsd-3-clause |
ntt-sic/cinder | cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py | 3 | 3415 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String, Table
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
sm_backend_config = Table('sm_backend_config', meta, autoload=True)
sm_flavors = Table('sm_flavors', meta, autoload=True)
sm_volume = Table('sm_volume', meta, autoload=True)
tables = [sm_volume, sm_backend_config, sm_flavors]
for table in tables:
try:
table.drop()
except Exception:
LOG.exception(_('Exception while dropping table %s.'),
repr(table))
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
Table('volumes', meta, autoload=True)
sm_backend_config = Table(
'sm_backend_config', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('flavor_id', Integer, ForeignKey('sm_flavors.id'),
nullable=False),
Column('sr_uuid', String(length=255)),
Column('sr_type', String(length=255)),
Column('config_params', String(length=2047)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_flavors = Table(
'sm_flavors', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('label', String(length=255)),
Column('description', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_volume = Table(
'sm_volume', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(length=36),
ForeignKey('volumes.id'),
primary_key=True,
nullable=False),
Column('backend_id', Integer, ForeignKey('sm_backend_config.id'),
nullable=False),
Column('vdi_uuid', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
tables = [sm_flavors, sm_backend_config, sm_volume]
for table in tables:
try:
table.create()
except Exception:
LOG.exception(_('Exception while creating table %s.'),
repr(table))
raise
| apache-2.0 |
spiceqa/virt-test | qemu/tests/timedrift_with_migration.py | 3 | 4546 | import logging
from autotest.client.shared import error
from virttest import utils_test
def run_timedrift_with_migration(test, params, env):
"""
Time drift test with migration:
1) Log into a guest.
2) Take a time reading from the guest and host.
3) Migrate the guest.
4) Take a second time reading.
5) If the drift (in seconds) is higher than a user specified value, fail.
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
boot_option_added = params.get("boot_option_added")
boot_option_removed = params.get("boot_option_removed")
if boot_option_added or boot_option_removed:
utils_test.update_boot_option(vm,
args_removed=boot_option_removed,
args_added=boot_option_added)
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Collect test parameters:
# Command to run to get the current time
time_command = params["time_command"]
# Filter which should match a string to be passed to time.strptime()
time_filter_re = params["time_filter_re"]
# Time format for time.strptime()
time_format = params["time_format"]
drift_threshold = float(params.get("drift_threshold", "10"))
drift_threshold_single = float(params.get("drift_threshold_single", "3"))
migration_iterations = int(params.get("migration_iterations", 1))
try:
# Get initial time
# (ht stands for host time, gt stands for guest time)
(ht0, gt0) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Migrate
for i in range(migration_iterations):
# Get time before current iteration
(ht0_, gt0_) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
session.close()
# Run current iteration
logging.info("Migrating: iteration %d of %d...",
(i + 1), migration_iterations)
vm.migrate()
# Log in
logging.info("Logging in after migration...")
session = vm.wait_for_login(timeout=30)
logging.info("Logged in after migration")
# Get time after current iteration
(ht1_, gt1_) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Report iteration results
host_delta = ht1_ - ht0_
guest_delta = gt1_ - gt0_
drift = abs(host_delta - guest_delta)
logging.info("Host duration (iteration %d): %.2f",
(i + 1), host_delta)
logging.info("Guest duration (iteration %d): %.2f",
(i + 1), guest_delta)
logging.info("Drift at iteration %d: %.2f seconds",
(i + 1), drift)
# Fail if necessary
if drift > drift_threshold_single:
raise error.TestFail("Time drift too large at iteration %d: "
"%.2f seconds" % (i + 1, drift))
# Get final time
(ht1, gt1) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
finally:
if session:
session.close()
# remove flags add for this test.
if boot_option_added or boot_option_removed:
utils_test.update_boot_option(vm,
args_removed=boot_option_added,
args_added=boot_option_removed)
# Report results
host_delta = ht1 - ht0
guest_delta = gt1 - gt0
drift = abs(host_delta - guest_delta)
logging.info("Host duration (%d migrations): %.2f",
migration_iterations, host_delta)
logging.info("Guest duration (%d migrations): %.2f",
migration_iterations, guest_delta)
logging.info("Drift after %d migrations: %.2f seconds",
migration_iterations, drift)
# Fail if necessary
if drift > drift_threshold:
raise error.TestFail("Time drift too large after %d migrations: "
"%.2f seconds" % (migration_iterations, drift))
| gpl-2.0 |
cernops/keystone | keystone/common/router.py | 5 | 3280 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import json_home
from keystone.common import wsgi
class Router(wsgi.ComposableRouter):
def __init__(self, controller, collection_key, key,
resource_descriptions=None,
is_entity_implemented=True,
method_template=None):
self.controller = controller
self.key = key
self.collection_key = collection_key
self._resource_descriptions = resource_descriptions
self._is_entity_implemented = is_entity_implemented
self.method_template = method_template or '%s'
def add_routes(self, mapper):
collection_path = '/%(collection_key)s' % {
'collection_key': self.collection_key}
entity_path = '/%(collection_key)s/{%(key)s_id}' % {
'collection_key': self.collection_key,
'key': self.key}
mapper.connect(
collection_path,
controller=self.controller,
action=self.method_template % 'create_%s' % self.key,
conditions=dict(method=['POST']))
mapper.connect(
collection_path,
controller=self.controller,
action=self.method_template % 'list_%s' % self.collection_key,
conditions=dict(method=['GET', 'HEAD']))
mapper.connect(
entity_path,
controller=self.controller,
action=self.method_template % 'get_%s' % self.key,
conditions=dict(method=['GET', 'HEAD']))
mapper.connect(
entity_path,
controller=self.controller,
action=self.method_template % 'update_%s' % self.key,
conditions=dict(method=['PATCH']))
mapper.connect(
entity_path,
controller=self.controller,
action=self.method_template % 'delete_%s' % self.key,
conditions=dict(method=['DELETE']))
# Add the collection resource and entity resource to the resource
# descriptions.
collection_rel = json_home.build_v3_resource_relation(
self.collection_key)
rel_data = {'href': collection_path, }
self._resource_descriptions.append((collection_rel, rel_data))
if self._is_entity_implemented:
entity_rel = json_home.build_v3_resource_relation(self.key)
id_str = '%s_id' % self.key
id_param_rel = json_home.build_v3_parameter_relation(id_str)
entity_rel_data = {
'href-template': entity_path,
'href-vars': {
id_str: id_param_rel,
},
}
self._resource_descriptions.append((entity_rel, entity_rel_data))
| apache-2.0 |
ibamacsr/painelmma_api | painel_api/settings/production.py | 1 | 2521 | # -*- coding: utf-8 -*-
"""Production settings and globals."""
from __future__ import absolute_import
from os import environ
from .base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = []
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {}
########## END DATABASE CONFIGURATION
########## DATABASE ROUTERS CONFIGURATION
#Uncomment this to add routers in production system
#DATABASE_ROUTERS = ['restApp.dbrouters.SiscomRouter']
########## END DATABASE ROUTERS CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
########## SCHEMA CONFIGURATIONS
SCHEMA = ''
########## SCHEMA END CONFIGURATIONS | mit |
dankilman/claw | claw/tests/commands/test_generate.py | 1 | 9306 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import sh
import yaml
from claw import tests
class GenerateTest(tests.BaseTestWithInit):
def setUp(self):
super(GenerateTest, self).setUp()
self.inputs = {'some': 'input'}
self.variables = {'a': 'AAA', 'b': 'BBB'}
def test_basic(self):
self._test()
def test_inputs(self):
self._test(inputs=self.inputs)
def test_inputs_override_in_handler_configuration_with_inputs(self):
self._test_inputs_override_in_handler_configuration(inputs=self.inputs)
def test_inputs_override_in_handler_configuration_no_inputs(self):
self._test_inputs_override_in_handler_configuration()
def _test_inputs_override_in_handler_configuration(self, inputs=None):
inputs_override = {'override': 'inputs {{a}}'}
processed_inputs_override = {'override': 'inputs AAA'}
self._test(inputs=inputs,
inputs_override=inputs_override,
processed_inputs_override=processed_inputs_override)
def test_manager_blueprint_override_in_handler_configuration(self):
blueprint_override = {'override': 'blueprint {{a}}'}
processed_blueprint_override = {'override': 'blueprint AAA'}
self._test(inputs=self.inputs,
blueprint_override=blueprint_override,
processed_blueprint_override=processed_blueprint_override)
def test_inputs_override_in_command_line_no_handler_inputs_override(self):
self._test_inputs_override_in_command_line()
def test_inputs_override_in_command_line_with_handler_inputs_override(self): # noqa
inputs_override = {'override': 'inputs {{a}}'}
processed_inputs_override = {'override': 'inputs AAA'}
self._test_inputs_override_in_command_line(
handler_inputs_override=inputs_override,
processed_handler_inputs_override=processed_inputs_override)
def _test_inputs_override_in_command_line(
self,
handler_inputs_override=None,
processed_handler_inputs_override=None): # noqa
cmd_inputs_override = {
'from_cmd1': {'cmd_override': 'cmd_inputs {{b}}'},
'from_cmd2': {'cmd_override2': 'cmd_inputs2 {{b}}'}
}
processed_inputs_override = {'cmd_override': 'cmd_inputs BBB',
'cmd_override2': 'cmd_inputs2 BBB'}
if processed_handler_inputs_override:
processed_inputs_override.update(processed_handler_inputs_override)
self._test(inputs_override=handler_inputs_override,
cmd_inputs_override=cmd_inputs_override,
processed_inputs_override=processed_inputs_override)
def test_manager_blueprint_override_in_command_line_no_handler_blueprint_override(self): # noqa
self._test_manager_blueprint_override_in_command_line()
def test_manager_blueprint_override_in_command_line_with_handler_blueprint_override(self): # noqa
blueprint_override = {'override': 'blueprint {{a}}'}
processed_blueprint_override = {'override': 'blueprint AAA'}
self._test_manager_blueprint_override_in_command_line(
handler_blueprint_override=blueprint_override,
processed_handler_blueprint_override=processed_blueprint_override)
def _test_manager_blueprint_override_in_command_line(
self,
handler_blueprint_override=None,
processed_handler_blueprint_override=None): # noqa
cmd_blueprint_override = {
'from_cmd1': {'cmd_override': 'cmd_blueprint {{b}}'},
'from_cmd2': {'cmd_override2': 'cmd_blueprint2 {{b}}'}
}
processed_blueprint_override = {'cmd_override': 'cmd_blueprint BBB',
'cmd_override2': 'cmd_blueprint2 BBB'}
if processed_handler_blueprint_override:
processed_blueprint_override.update(
processed_handler_blueprint_override)
self._test(blueprint_override=handler_blueprint_override,
cmd_blueprint_override=cmd_blueprint_override,
processed_blueprint_override=processed_blueprint_override)
def test_no_such_configuration(self):
with self.assertRaises(sh.ErrorReturnCode) as c:
self.claw.generate('no_such_configuration')
self.assertIn('No such configuration', c.exception.stderr)
def test_existing_configuration_no_reset(self):
self._test()
with self.assertRaises(sh.ErrorReturnCode) as c:
self._test()
self.assertIn('Configuration already initialized', c.exception.stderr)
def test_existing_configuration_reset(self):
self._test()
self._test(reset=True)
def test_existing_reset_in_conf_dir(self):
config_dir = self._test()
with config_dir:
self._test(reset=True)
def test_existing_current_configuration(self):
self._test()
self._test(configuration='some_other_conf')
def _test(self,
inputs=None,
inputs_override=None,
cmd_inputs_override=None,
processed_inputs_override=None,
blueprint_override=None,
cmd_blueprint_override=None,
processed_blueprint_override=None,
reset=False,
configuration=None):
configuration = configuration or 'conf1'
blueprint_dir = self.workdir / 'blueprint'
blueprint_dir.mkdir_p()
inputs_path = blueprint_dir / 'inputs.yaml'
blueprint_path = blueprint_dir / 'some-manager-blueprint.yaml'
config_dir = self.workdir / 'configurations' / configuration
new_inputs_path = config_dir / 'inputs.yaml'
new_blueprint_path = (config_dir / 'manager-blueprint' /
'manager-blueprint.yaml')
handler_configuration_path = config_dir / 'handler-configuration.yaml'
blueprint = {'some_other': 'manager_blueprint'}
if inputs:
inputs_path.write_text(yaml.safe_dump(inputs))
blueprint_path.write_text(yaml.safe_dump(blueprint))
handler_configuration = {
'handler': 'stub_handler',
'manager_blueprint': str(blueprint_path),
}
if inputs:
handler_configuration['inputs'] = str(inputs_path)
if inputs_override:
handler_configuration['inputs_override'] = inputs_override
if blueprint_override:
handler_configuration[
'manager_blueprint_override'] = blueprint_override
command_args = [configuration]
suites_yaml = {
'variables': self.variables,
'handler_configurations': {
configuration: handler_configuration
}
}
if cmd_inputs_override:
suites_yaml['inputs_override_templates'] = cmd_inputs_override
for name in cmd_inputs_override:
command_args += ['-i', name]
if cmd_blueprint_override:
suites_yaml['manager_blueprint_override_templates'] = (
cmd_blueprint_override)
for name in cmd_blueprint_override:
command_args += ['-b', name]
self.settings.user_suites_yaml.write_text(yaml.safe_dump(suites_yaml))
self.claw.generate(*command_args, reset=reset)
expected_inputs = (inputs or {}).copy()
expected_inputs.update((processed_inputs_override or {}))
self.assertEqual(expected_inputs,
yaml.safe_load(new_inputs_path.text()))
expected_blueprint = blueprint.copy()
expected_blueprint.update((processed_blueprint_override or {}))
self.assertEqual(expected_blueprint,
yaml.safe_load(new_blueprint_path.text()))
expected_handler_configuration = handler_configuration.copy()
expected_handler_configuration.pop('inputs_override', {})
expected_handler_configuration.pop('manager_blueprint_override', {})
expected_handler_configuration.update({
'install_manager_blueprint_dependencies': False,
'manager_blueprint': new_blueprint_path,
'inputs': new_inputs_path
})
self.assertEqual(expected_handler_configuration,
yaml.safe_load(handler_configuration_path.text()))
self.assertEqual(suites_yaml,
yaml.safe_load(self.settings.user_suites_yaml.text()))
self.assertEqual((self.workdir / 'configurations' / '_').readlink(),
configuration)
return config_dir
| apache-2.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/rackspace/rax_mon_check.py | 77 | 10762 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_check
short_description: Create or delete a Rackspace Cloud Monitoring check for an
existing entity.
description:
- Create or delete a Rackspace Cloud Monitoring check associated with an
existing rax_mon_entity. A check is a specific test or measurement that is
performed, possibly from different monitoring zones, on the systems you
monitor. Rackspace monitoring module flow | rax_mon_entity ->
*rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that a check with this C(label) exists or does not exist.
choices: ["present", "absent"]
entity_id:
description:
- ID of the rax_mon_entity to target with this check.
required: true
label:
description:
- Defines a label for this check, between 1 and 64 characters long.
required: true
check_type:
description:
- The type of check to create. C(remote.) checks may be created on any
rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
that have a non-null C(agent_id).
choices:
- remote.dns
- remote.ftp-banner
- remote.http
- remote.imap-banner
- remote.mssql-banner
- remote.mysql-banner
- remote.ping
- remote.pop3-banner
- remote.postgresql-banner
- remote.smtp-banner
- remote.smtp
- remote.ssh
- remote.tcp
- remote.telnet-banner
- agent.filesystem
- agent.memory
- agent.load_average
- agent.cpu
- agent.disk
- agent.network
- agent.plugin
required: true
monitoring_zones_poll:
description:
- Comma-separated list of the names of the monitoring zones the check should
run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
target_hostname:
description:
- One of `target_hostname` and `target_alias` is required for remote.* checks,
but prohibited for agent.* checks. The hostname this check should target.
Must be a valid IPv4, IPv6, or FQDN.
target_alias:
description:
- One of `target_alias` and `target_hostname` is required for remote.* checks,
but prohibited for agent.* checks. Use the corresponding key in the entity's
`ip_addresses` hash to resolve an IP address to target.
details:
description:
- Additional details specific to the check type. Must be a hash of strings
between 1 and 255 characters long, or an array or object containing 0 to
256 items.
disabled:
description:
- If "yes", ensure the check is created, but don't actually use it yet.
type: bool
metadata:
description:
- Hash of arbitrary key-value pairs to accompany this check if it fires.
Keys and values must be strings between 1 and 255 characters long.
period:
description:
- The number of seconds between each time the check is performed. Must be
greater than the minimum period set on your account.
timeout:
description:
- The number of seconds this check will wait when attempting to collect
results. Must be less than the period.
author: Ash Wilson (@smashwilson)
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a monitoring check
gather_facts: False
hosts: local
connection: local
tasks:
- name: Associate a check with an existing entity.
rax_mon_check:
credentials: ~/.rax_pub
state: present
entity_id: "{{ the_entity['entity']['id'] }}"
label: the_check
check_type: remote.ping
monitoring_zones_poll: mziad,mzord,mzdfw
details:
count: 10
meta:
hurf: durf
register: the_check
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def cloud_check(module, state, entity_id, label, check_type,
monitoring_zones_poll, target_hostname, target_alias, details,
disabled, metadata, period, timeout):
# Coerce attributes.
if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
monitoring_zones_poll = [monitoring_zones_poll]
if period:
period = int(period)
if timeout:
timeout = int(timeout)
changed = False
check = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
entity = cm.get_entity(entity_id)
if not entity:
module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
' a valid entity id.' % entity_id)
existing = [e for e in entity.list_checks() if e.label == label]
if existing:
check = existing[0]
if state == 'present':
if len(existing) > 1:
module.fail_json(msg='%s existing checks have a label of %s.' %
(len(existing), label))
should_delete = False
should_create = False
should_update = False
if check:
# Details may include keys set to default values that are not
# included in the initial creation.
#
# Only force a recreation of the check if one of the *specified*
# keys is missing or has a different value.
if details:
for (key, value) in details.items():
if key not in check.details:
should_delete = should_create = True
elif value != check.details[key]:
should_delete = should_create = True
should_update = label != check.label or \
(target_hostname and target_hostname != check.target_hostname) or \
(target_alias and target_alias != check.target_alias) or \
(disabled != check.disabled) or \
(metadata and metadata != check.metadata) or \
(period and period != check.period) or \
(timeout and timeout != check.timeout) or \
(monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
if should_update and not should_delete:
check.update(label=label,
disabled=disabled,
metadata=metadata,
monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout,
period=period,
target_alias=target_alias,
target_hostname=target_hostname)
changed = True
else:
# The check doesn't exist yet.
should_create = True
if should_delete:
check.delete()
if should_create:
check = cm.create_check(entity,
label=label,
check_type=check_type,
target_hostname=target_hostname,
target_alias=target_alias,
monitoring_zones_poll=monitoring_zones_poll,
details=details,
disabled=disabled,
metadata=metadata,
period=period,
timeout=timeout)
changed = True
elif state == 'absent':
if check:
check.delete()
changed = True
else:
module.fail_json(msg='state must be either present or absent.')
if check:
check_dict = {
"id": check.id,
"label": check.label,
"type": check.type,
"target_hostname": check.target_hostname,
"target_alias": check.target_alias,
"monitoring_zones_poll": check.monitoring_zones_poll,
"details": check.details,
"disabled": check.disabled,
"metadata": check.metadata,
"period": check.period,
"timeout": check.timeout
}
module.exit_json(changed=changed, check=check_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
entity_id=dict(required=True),
label=dict(required=True),
check_type=dict(required=True),
monitoring_zones_poll=dict(),
target_hostname=dict(),
target_alias=dict(),
details=dict(type='dict', default={}),
disabled=dict(type='bool', default=False),
metadata=dict(type='dict', default={}),
period=dict(type='int'),
timeout=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
entity_id = module.params.get('entity_id')
label = module.params.get('label')
check_type = module.params.get('check_type')
monitoring_zones_poll = module.params.get('monitoring_zones_poll')
target_hostname = module.params.get('target_hostname')
target_alias = module.params.get('target_alias')
details = module.params.get('details')
disabled = module.boolean(module.params.get('disabled'))
metadata = module.params.get('metadata')
period = module.params.get('period')
timeout = module.params.get('timeout')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_check(module, state, entity_id, label, check_type,
monitoring_zones_poll, target_hostname, target_alias, details,
disabled, metadata, period, timeout)
if __name__ == '__main__':
main()
| gpl-3.0 |
grandtiger/profitpy | profit/lib/widgets/callableselectwidget.py | 18 | 10034 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
## TODO: match warning to location label
## TODO: fix enable/disable on change
## TODO: streamline
## TODO: provide valid marker on source edit emit
import logging
import sys
from os.path import abspath, exists
from string import Template
from tempfile import NamedTemporaryFile
from PyQt4.QtCore import QProcess, QVariant, pyqtSignature
from PyQt4.QtGui import QFileDialog, QFrame, QInputDialog, QMessageBox
from profit.lib import Settings, Signals
from profit.lib.widgets.syspathdialog import SysPathDialog
from profit.lib.widgets.ui_callableselect import Ui_CallableSelectWidget
class CallableSelectWidget(QFrame, Ui_CallableSelectWidget):
""" CallableSelectWidget -> compound widget type for specifying a callable item
"""
## Six types are supported in the class. These can be selectively
## disabled by the client. NB: this list must match the items
## defined in the ui file.
unsetType, externalType, objectType, factoryType, sourceType, fileType = range(6)
## The types are mapped to names so we can search for them by
## value.
callTypeMap = {
unsetType:'',
externalType:'external',
objectType:'object',
factoryType:'factory',
sourceType:'source',
fileType:'file',
}
## Each callable type suggests a suitable location label. Notice
## we're mapping from the name, not the index.
typeLocationLabels = {
'':'',
'external':'Command:',
'object':'Value:',
'factory':'Value:',
'source':'Expression:',
'file':'File:',
}
## This is a list of names for convenience.
pythonTypes = [
callTypeMap[objectType],
callTypeMap[factoryType],
callTypeMap[sourceType],
]
## Another list of names for convenience.
fsTypes = [
callTypeMap[externalType],
callTypeMap[fileType],
]
revertSource = saveSource = None
requireExpression = True
def __init__(self, parent=None):
""" Initializer.
"""
QFrame.__init__(self, parent)
self.setupUi(self)
self.setupCallableTypes()
def setupCallableTypes(self):
""" Routine for configuration of the callableTypes combo.
"""
for key, value in self.callTypeMap.items():
self.callableType.setItemData(key, QVariant(value))
self.connect(self.callableType, Signals.currentIndexChanged,
self, Signals.currentIndexChanged)
def setAttributes(self, **kwds):
"""
"""
items = [
('callType', self.unsetType),
('locationText', ''),
('sourceText', ''),
('revertSource', None),
('saveSource', None),
('requireExpression', True),
]
for name, default in items:
setattr(self, name, kwds.get(name, default))
def basicSetup(self, **kwds):
""" Client configuration method. Call this to configure an
instance after initalization.
"""
for key, value in self.callTypeMap.items():
if kwds.get('disable%sType' % value.title(), False):
self.callableType.removeItem(
self.callableType.findData(QVariant(value)))
self.stackedWidget.removeWidget(
self.stackedWidget.widget(key))
self.setAttributes(**kwds)
self.saveButton.setEnabled(False)
self.revertButton.setEnabled(False)
def renameCallableTypeItem(self, old, new):
""" Rename an callable type item without losing its value.
"""
index = self.callableType.findText(old)
if index > -1:
self.callableType.setItemText(index, new)
## property for getting and setting the call type by index. note
## that there isn't any conversion happening or necessary.
def getCallTypeIndex(self):
return self.callableType.currentIndex()
def setCallTypeIndex(self, index):
self.callableType.setCurretIndex(index)
callTypeIndex = property(getCallTypeIndex, setCallTypeIndex)
## property for getting the call type name as a string. note that
## there isn't a setter here, and note that the getter returns a
## python string, not a QString.
def getCallType(self):
wid = self.callableType
return str(wid.itemData(wid.currentIndex()).toString())
def setCallType(self, value):
wid = self.callableType
wid.setCurrentIndex(wid.findData(QVariant(value)))
callType = property(getCallType, setCallType)
## property for getting and setting the location text. again note
## the getter returns a python string.
def getLocationText(self):
return str(self.callableLocation.text())
def setLocationText(self, text):
self.callableLocation.setText(text)
locationText = property(getLocationText, setLocationText)
## property for getting and setting the source code text. note
## that the getter fixes up the string per python "compile"
## function requirements.
def getSourceEditorText(self):
source = self.callableSourceEditor.text()
source = str(source).replace('\r\n', '\n')
if not source.endswith('\n'):
source += '\n'
return source
def setSourceEditorText(self, text):
self.callableSourceEditor.setText(text)
sourceText = property(getSourceEditorText, setSourceEditorText)
def warn(self, text, widget=None):
format = '<b>Warning:</b> %s.' if text else '%s'
if widget is None:
widget = self.locationWarning
widget.setText(format % text)
def on_textEdit_textChanged(self):
try:
self.callableCode()
except (SyntaxError, ):
msg = 'invalid syntax'
else:
msg = ''
self.warn(msg, self.sourceWarning)
self.saveButton.setEnabled(True)
self.revertButton.setEnabled(True)
def emitChanged(self):
idx, typ, loc, txt = (
self.callTypeIndex,
self.callType,
self.locationText,
self.sourceText,
)
if idx != self.callTypeMap[self.sourceType]:
txt = ''
val = 'unknown'
self.emit(Signals.modified, idx, typ, loc, txt, val)
@pyqtSignature('int')
def on_callableType_currentIndexChanged(self, index):
## enable or disable the location-related widgets; there isn't
## a group because of the grid layout.
for widget in self.locationWidgets():
widget.setDisabled(index == self.unsetType)
## set the location label to something relevant to the
## selected type.
v = str(self.callableType.itemData(index).toString())
self.locationLabel.setText(self.typeLocationLabels[v])
if index == self.externalType:
self.checkPathExists()
else:
isSource = (self.callType == self.callTypeMap[self.sourceType])
self.callableLocationSelect.setDisabled(isSource)
if isSource:
self.callableLocation.setDisabled(not self.requireExpression)
self.emitChanged()
def checkPathExists(self):
if not exists(abspath(self.locationText)):
msg = 'location does not exist'
else:
msg = ''
self.warn(msg)
def callableCode(self):
try:
src = self.sourceText
except (AttributeError, ):
src = ''
return compile(src, '<string>', 'exec')
def on_callableLocation_textChanged(self, text):
self.warn('')
if self.callType == self.callTypeMap[self.sourceType]:
try:
code = self.callableCode()
except (SyntaxError, ):
msg = 'invalid syntax'
else:
text = str(text)
msg = ''
if (text and (text not in code.co_names)) and self.requireExpression:
msg = 'expression not found in source'
self.warn(msg)
elif self.callType in self.fsTypes:
self.checkPathExists()
self.emitChanged()
@pyqtSignature('')
def on_callableLocationSelect_clicked(self):
name = None
calltype = self.callType
if calltype in self.fsTypes:
filename = QFileDialog.getOpenFileName(
self, 'Select %s' % self.callTypeText, '',
'Executable file (*.*)')
if filename:
name = filename
elif calltype in self.pythonTypes:
dlg = SysPathDialog(self)
if dlg.exec_() == dlg.Accepted:
name = dlg.selectedEdit.text()
elif not calltype:
QMessageBox.warning(
self, 'Invalid Type', 'Select a callable type first.')
else:
pass # unknownType item (0) selected
if name is not None:
self.locationText = name
self.emitChanged()
@pyqtSignature('')
def on_revertButton_clicked(self):
if self.revertSource:
self.sourceText = self.revertSource()
self.saveButton.setEnabled(False)
self.revertButton.setEnabled(False)
@pyqtSignature('')
def on_saveButton_clicked(self):
if self.saveSource:
self.saveSource(self.sourceText)
self.saveButton.setEnabled(False)
self.revertButton.setEnabled(False)
def locationWidgets(self):
return [self.locationLabel, self.locationWarning,
self.callableLocationSelect, self.callableLocation, ]
if __name__ == '__main__':
from PyQt4.QtGui import QApplication
app = QApplication(sys.argv)
window = CallableSelectWidget(parent=None)
window.show()
sys.exit(app.exec_())
| gpl-2.0 |
openfun/edx-platform | lms/djangoapps/notification_prefs/views.py | 163 | 7387 | from base64 import urlsafe_b64encode, urlsafe_b64decode
from hashlib import sha256
import json
from Crypto.Cipher import AES
from Crypto import Random
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_GET, require_POST
from edxmako.shortcuts import render_to_response
from notification_prefs import NOTIFICATION_PREF_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference
class UsernameDecryptionException(Exception):
pass
class UsernameCipher(object):
"""
A transformation of a username to/from an opaque token
The purpose of the token is to make one-click unsubscribe links that don't
require the user to log in. To prevent users from unsubscribing other users,
we must ensure the token cannot be computed by anyone who has this
source code. The token must also be embeddable in a URL.
Thus, we take the following steps to encode (and do the inverse to decode):
1. Pad the UTF-8 encoding of the username with PKCS#7 padding to match the
AES block length
2. Generate a random AES block length initialization vector
3. Use AES-256 (with a hash of settings.SECRET_KEY as the encryption key)
in CBC mode to encrypt the username
4. Prepend the IV to the encrypted value to allow for initialization of the
decryption cipher
5. base64url encode the result
"""
@staticmethod
def _get_aes_cipher(initialization_vector):
hash_ = sha256()
hash_.update(settings.SECRET_KEY)
return AES.new(hash_.digest(), AES.MODE_CBC, initialization_vector)
@staticmethod
def _add_padding(input_str):
"""Return `input_str` with PKCS#7 padding added to match AES block length"""
padding_len = AES.block_size - len(input_str) % AES.block_size
return input_str + padding_len * chr(padding_len)
@staticmethod
def _remove_padding(input_str):
"""Return `input_str` with PKCS#7 padding trimmed to match AES block length"""
num_pad_bytes = ord(input_str[-1])
if num_pad_bytes < 1 or num_pad_bytes > AES.block_size or num_pad_bytes >= len(input_str):
raise UsernameDecryptionException("padding")
return input_str[:-num_pad_bytes]
@staticmethod
def encrypt(username):
initialization_vector = Random.new().read(AES.block_size)
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
return urlsafe_b64encode(
initialization_vector +
aes_cipher.encrypt(UsernameCipher._add_padding(username.encode("utf-8")))
)
@staticmethod
def decrypt(token):
try:
base64_decoded = urlsafe_b64decode(token)
except TypeError:
raise UsernameDecryptionException("base64url")
if len(base64_decoded) < AES.block_size:
raise UsernameDecryptionException("initialization_vector")
initialization_vector = base64_decoded[:AES.block_size]
aes_encrypted = base64_decoded[AES.block_size:]
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
try:
decrypted = aes_cipher.decrypt(aes_encrypted)
except ValueError:
raise UsernameDecryptionException("aes")
return UsernameCipher._remove_padding(decrypted)
def enable_notifications(user):
"""
Enable notifications for a user.
Currently only used for daily forum digests.
"""
# Calling UserPreference directly because this method is called from a couple of places,
# and it is not clear that user is always the user initiating the request.
UserPreference.objects.get_or_create(
user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
}
)
@require_POST
def ajax_enable(request):
"""
A view that enables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error. If notifications were already enabled for this
user, this has no effect. Otherwise, a preference is created with the
unsubscribe token (an encryption of the username) as the value.username
"""
if not request.user.is_authenticated():
raise PermissionDenied
enable_notifications(request.user)
return HttpResponse(status=204)
@require_POST
def ajax_disable(request):
"""
A view that disables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
delete_user_preference(request.user, NOTIFICATION_PREF_KEY)
return HttpResponse(status=204)
@require_GET
def ajax_status(request):
"""
A view that retrieves notifications status for the authenticated user.
This view should be invoked by an AJAX GET call. It returns status 200,
with a JSON-formatted payload, or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
qs = UserPreference.objects.filter(
user=request.user,
key=NOTIFICATION_PREF_KEY
)
return HttpResponse(json.dumps({"status": len(qs)}), content_type="application/json")
@require_GET
def set_subscription(request, token, subscribe): # pylint: disable=unused-argument
"""
A view that disables or re-enables notifications for a user who may not be authenticated
This view is meant to be the target of an unsubscribe link. The request
must be a GET, and the `token` parameter must decrypt to a valid username.
The subscribe flag feature controls whether the view subscribes or unsubscribes the user, with subscribe=True
used to "undo" accidentally clicking on the unsubscribe link
A 405 will be returned if the request method is not GET. A 404 will be
returned if the token parameter does not decrypt to a valid username. On
success, the response will contain a page indicating success.
"""
try:
username = UsernameCipher().decrypt(token.encode())
user = User.objects.get(username=username)
except UnicodeDecodeError:
raise Http404("base64url")
except UsernameDecryptionException as exn:
raise Http404(exn.message)
except User.DoesNotExist:
raise Http404("username")
# Calling UserPreference directly because the fact that the user is passed in the token implies
# that it may not match request.user.
if subscribe:
UserPreference.objects.get_or_create(user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
})
return render_to_response("resubscribe.html", {'token': token})
else:
UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).delete()
return render_to_response("unsubscribe.html", {'token': token})
| agpl-3.0 |
Signbank/Auslan-signbank | signbank/dictionary/urls.py | 1 | 2232 | from django.conf.urls import *
from django.contrib.auth.decorators import login_required, permission_required
from signbank.dictionary.models import *
from signbank.dictionary.forms import *
from signbank.dictionary.adminviews import GlossListView, GlossDetailView
urlpatterns = patterns('',
# index page is just the search page
url(r'^$', 'signbank.dictionary.views.search'),
# we use the same view for a definition and for the feedback form on that
# definition, the first component of the path is word or feedback in each case
url(r'^words/(?P<keyword>.+)-(?P<n>\d+).html$',
'signbank.dictionary.views.word', name='word_view'),
url(r'^tag/(?P<tag>[^/]*)/?$', 'signbank.dictionary.tagviews.taglist'),
# and and alternate view for direct display of a gloss
url(r'gloss/(?P<idgloss>.+).html$', 'signbank.dictionary.views.gloss', name='public_gloss'),
url(r'^search/$', 'signbank.dictionary.views.search', name="search"),
url(r'^update/gloss/(?P<glossid>\d+)$', 'signbank.dictionary.update.update_gloss', name='update_gloss'),
url(r'^update/tag/(?P<glossid>\d+)$', 'signbank.dictionary.update.add_tag', name='add_tag'),
url(r'^update/definition/(?P<glossid>\d+)$', 'signbank.dictionary.update.add_definition', name='add_definition'),
url(r'^update/relation/$', 'signbank.dictionary.update.add_relation', name='add_relation'),
url(r'^update/region/(?P<glossid>\d+)$', 'signbank.dictionary.update.add_region', name='add_region'),
url(r'^update/gloss/', 'signbank.dictionary.update.add_gloss', name='add_gloss'),
url(r'^ajax/keyword/(?P<prefix>.*)$', 'signbank.dictionary.views.keyword_value_list'),
url(r'^ajax/tags/$', 'signbank.dictionary.tagviews.taglist_json'),
url(r'^ajax/gloss/(?P<prefix>.*)$', 'signbank.dictionary.adminviews.gloss_ajax_complete', name='gloss_complete'),
url(r'^missingvideo.html$', 'signbank.dictionary.views.missing_video_view'),
# Admin views
url(r'^list/$', permission_required('dictionary.search_gloss')(GlossListView.as_view()), name='admin_gloss_list'),
url(r'^gloss/(?P<pk>\d+)', permission_required('dictionary.search_gloss')(GlossDetailView.as_view()), name='admin_gloss_view'),
)
| bsd-3-clause |
yaseppochi/mailman | src/mailman/database/types.py | 4 | 2401 | # Copyright (C) 2007-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Database type conversions."""
__all__ = [
'Enum',
'UUID',
]
import uuid
from sqlalchemy import Integer
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import TypeDecorator, CHAR
class Enum(TypeDecorator):
"""Handle Python 3.4 style enums.
Stores an integer-based Enum as an integer in the database, and
converts it on-the-fly.
"""
impl = Integer
def __init__(self, enum, *args, **kw):
self.enum = enum
super(Enum, self).__init__(*args, **kw)
def process_bind_param(self, value, dialect):
if value is None:
return None
return value.value
def process_result_value(self, value, dialect):
if value is None:
return None
return self.enum(value)
class UUID(TypeDecorator):
"""Platform-independent GUID type.
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
value = uuid.UUID(value)
return '%.32x' % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
| gpl-3.0 |
Gateworks/platform-external-chromium_org | net/tools/testserver/asn1.py | 180 | 3751 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file implements very minimal ASN.1, DER serialization.
import types
def ToDER(obj):
'''ToDER converts the given object into DER encoding'''
if type(obj) == types.NoneType:
# None turns into NULL
return TagAndLength(5, 0)
if type(obj) == types.StringType:
# Strings are PRINTABLESTRING
return TagAndLength(19, len(obj)) + obj
if type(obj) == types.BooleanType:
val = "\x00"
if obj:
val = "\xff"
return TagAndLength(1, 1) + val
if type(obj) == types.IntType or type(obj) == types.LongType:
big_endian = []
val = obj
while val != 0:
big_endian.append(val & 0xff)
val >>= 8
if len(big_endian) == 0 or big_endian[-1] >= 128:
big_endian.append(0)
big_endian.reverse()
return TagAndLength(2, len(big_endian)) + ToBytes(big_endian)
return obj.ToDER()
def ToBytes(array_of_bytes):
'''ToBytes converts the array of byte values into a binary string'''
return ''.join([chr(x) for x in array_of_bytes])
def TagAndLength(tag, length):
der = [tag]
if length < 128:
der.append(length)
elif length < 256:
der.append(0x81)
der.append(length)
elif length < 65535:
der.append(0x82)
der.append(length >> 8)
der.append(length & 0xff)
else:
assert False
return ToBytes(der)
class Raw(object):
'''Raw contains raw DER encoded bytes that are used verbatim'''
def __init__(self, der):
self.der = der
def ToDER(self):
return self.der
class Explicit(object):
'''Explicit prepends an explicit tag'''
def __init__(self, tag, child):
self.tag = tag
self.child = child
def ToDER(self):
der = ToDER(self.child)
tag = self.tag
tag |= 0x80 # content specific
tag |= 0x20 # complex
return TagAndLength(tag, len(der)) + der
class ENUMERATED(object):
def __init__(self, value):
self.value = value
def ToDER(self):
return TagAndLength(10, 1) + chr(self.value)
class SEQUENCE(object):
def __init__(self, children):
self.children = children
def ToDER(self):
der = ''.join([ToDER(x) for x in self.children])
return TagAndLength(0x30, len(der)) + der
class SET(object):
def __init__(self, children):
self.children = children
def ToDER(self):
der = ''.join([ToDER(x) for x in self.children])
return TagAndLength(0x31, len(der)) + der
class OCTETSTRING(object):
def __init__(self, val):
self.val = val
def ToDER(self):
return TagAndLength(4, len(self.val)) + self.val
class OID(object):
def __init__(self, parts):
self.parts = parts
def ToDER(self):
if len(self.parts) < 2 or self.parts[0] > 6 or self.parts[1] >= 40:
assert False
der = [self.parts[0]*40 + self.parts[1]]
for x in self.parts[2:]:
if x == 0:
der.append(0)
else:
octets = []
while x != 0:
v = x & 0x7f
if len(octets) > 0:
v |= 0x80
octets.append(v)
x >>= 7
octets.reverse()
der = der + octets
return TagAndLength(6, len(der)) + ToBytes(der)
class UTCTime(object):
def __init__(self, time_str):
self.time_str = time_str
def ToDER(self):
return TagAndLength(23, len(self.time_str)) + self.time_str
class GeneralizedTime(object):
def __init__(self, time_str):
self.time_str = time_str
def ToDER(self):
return TagAndLength(24, len(self.time_str)) + self.time_str
class BitString(object):
def __init__(self, bits):
self.bits = bits
def ToDER(self):
return TagAndLength(3, 1 + len(self.bits)) + "\x00" + self.bits
| bsd-3-clause |
fangxingli/hue | desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/commoncrypto/common_cryptor.py | 10 | 2711 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <CommonCrypto/CommonCryptor.h>
"""
TYPES = """
enum {
kCCAlgorithmAES128 = 0,
kCCAlgorithmDES,
kCCAlgorithm3DES,
kCCAlgorithmCAST,
kCCAlgorithmRC4,
kCCAlgorithmRC2,
kCCAlgorithmBlowfish
};
typedef uint32_t CCAlgorithm;
enum {
kCCSuccess = 0,
kCCParamError = -4300,
kCCBufferTooSmall = -4301,
kCCMemoryFailure = -4302,
kCCAlignmentError = -4303,
kCCDecodeError = -4304,
kCCUnimplemented = -4305
};
typedef int32_t CCCryptorStatus;
typedef uint32_t CCOptions;
enum {
kCCEncrypt = 0,
kCCDecrypt,
};
typedef uint32_t CCOperation;
typedef ... *CCCryptorRef;
enum {
kCCModeOptionCTR_LE = 0x0001,
kCCModeOptionCTR_BE = 0x0002
};
typedef uint32_t CCModeOptions;
enum {
kCCModeECB = 1,
kCCModeCBC = 2,
kCCModeCFB = 3,
kCCModeCTR = 4,
kCCModeF8 = 5,
kCCModeLRW = 6,
kCCModeOFB = 7,
kCCModeXTS = 8,
kCCModeRC4 = 9,
kCCModeCFB8 = 10,
kCCModeGCM = 11
};
typedef uint32_t CCMode;
enum {
ccNoPadding = 0,
ccPKCS7Padding = 1,
};
typedef uint32_t CCPadding;
"""
FUNCTIONS = """
CCCryptorStatus CCCryptorCreateWithMode(CCOperation, CCMode, CCAlgorithm,
CCPadding, const void *, const void *,
size_t, const void *, size_t, int,
CCModeOptions, CCCryptorRef *);
CCCryptorStatus CCCryptorCreate(CCOperation, CCAlgorithm, CCOptions,
const void *, size_t, const void *,
CCCryptorRef *);
CCCryptorStatus CCCryptorUpdate(CCCryptorRef, const void *, size_t, void *,
size_t, size_t *);
CCCryptorStatus CCCryptorFinal(CCCryptorRef, void *, size_t, size_t *);
CCCryptorStatus CCCryptorRelease(CCCryptorRef);
CCCryptorStatus CCCryptorGCMAddIV(CCCryptorRef, const void *, size_t);
CCCryptorStatus CCCryptorGCMAddAAD(CCCryptorRef, const void *, size_t);
CCCryptorStatus CCCryptorGCMEncrypt(CCCryptorRef, const void *, size_t,
void *);
CCCryptorStatus CCCryptorGCMDecrypt(CCCryptorRef, const void *, size_t,
void *);
CCCryptorStatus CCCryptorGCMFinal(CCCryptorRef, const void *, size_t *);
CCCryptorStatus CCCryptorGCMReset(CCCryptorRef);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
/* Not defined in the public header */
enum {
kCCModeGCM = 11
};
"""
| apache-2.0 |
serverdensity/sd-agent-core-plugins | riak/test_riak.py | 1 | 14008 | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import socket
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
@attr(requires='riak')
class RiakTestCase(AgentCheckTest):
CHECK_NAME = 'riak'
CHECK_GAUGES = [
'riak.node_gets',
'riak.node_gets_total',
'riak.node_puts',
'riak.node_puts_total',
'riak.node_gets_counter',
'riak.node_gets_counter_total',
'riak.node_gets_set',
'riak.node_gets_set_total',
'riak.node_gets_map',
'riak.node_gets_map_total',
'riak.node_puts_counter',
'riak.node_puts_counter_total',
'riak.node_puts_set',
'riak.node_puts_set_total',
'riak.node_puts_map',
'riak.node_puts_map_total',
'riak.object_merge',
'riak.object_merge_total',
'riak.object_counter_merge',
'riak.object_counter_merge_total',
'riak.object_set_merge',
'riak.object_set_merge_total',
'riak.object_map_merge',
'riak.object_map_merge_total',
'riak.pbc_active',
'riak.pbc_connects',
'riak.pbc_connects_total',
'riak.read_repairs',
'riak.read_repairs_total',
'riak.skipped_read_repairs',
'riak.skipped_read_repairs_total',
'riak.read_repairs_counter',
'riak.read_repairs_counter_total',
'riak.read_repairs_set',
'riak.read_repairs_set_total',
'riak.read_repairs_map',
'riak.read_repairs_map_total',
'riak.node_get_fsm_active',
'riak.node_get_fsm_active_60s',
'riak.node_get_fsm_in_rate',
'riak.node_get_fsm_out_rate',
'riak.node_get_fsm_rejected',
'riak.node_get_fsm_rejected_60s',
'riak.node_get_fsm_rejected_total',
'riak.node_get_fsm_errors',
'riak.node_get_fsm_errors_total',
'riak.node_put_fsm_active',
'riak.node_put_fsm_active_60s',
'riak.node_put_fsm_in_rate',
'riak.node_put_fsm_out_rate',
'riak.node_put_fsm_rejected',
'riak.node_put_fsm_rejected_60s',
'riak.node_put_fsm_rejected_total',
'riak.riak_kv_vnodes_running',
'riak.vnode_gets',
'riak.vnode_gets_total',
'riak.vnode_puts',
'riak.vnode_puts_total',
'riak.vnode_counter_update',
'riak.vnode_counter_update_total',
'riak.vnode_set_update',
'riak.vnode_set_update_total',
'riak.vnode_map_update',
'riak.vnode_map_update_total',
'riak.vnode_index_deletes',
'riak.vnode_index_deletes_postings',
'riak.vnode_index_deletes_postings_total',
'riak.vnode_index_deletes_total',
'riak.vnode_index_reads',
'riak.vnode_index_reads_total',
'riak.vnode_index_refreshes',
'riak.vnode_index_refreshes_total',
'riak.vnode_index_writes',
'riak.vnode_index_writes_postings',
'riak.vnode_index_writes_postings_total',
'riak.vnode_index_writes_total',
'riak.dropped_vnode_requests_total',
'riak.list_fsm_active',
'riak.list_fsm_create',
'riak.list_fsm_create_total',
'riak.list_fsm_create_error',
'riak.list_fsm_create_error_total',
'riak.index_fsm_active',
'riak.index_fsm_create',
'riak.index_fsm_create_error',
'riak.riak_pipe_vnodes_running',
'riak.executing_mappers',
'riak.pipeline_active',
'riak.pipeline_create_count',
'riak.pipeline_create_error_count',
'riak.pipeline_create_error_one',
'riak.pipeline_create_one',
'riak.rings_reconciled',
'riak.rings_reconciled_total',
'riak.converge_delay_last',
'riak.converge_delay_max',
'riak.converge_delay_mean',
'riak.converge_delay_min',
'riak.rebalance_delay_last',
'riak.rebalance_delay_max',
'riak.rebalance_delay_mean',
'riak.rebalance_delay_min',
'riak.rejected_handoffs',
'riak.handoff_timeouts',
'riak.coord_redirs_total',
'riak.gossip_received',
'riak.ignored_gossip_total',
'riak.mem_allocated',
'riak.mem_total',
'riak.memory_atom',
'riak.memory_atom_used',
'riak.memory_binary',
'riak.memory_code',
'riak.memory_ets',
'riak.memory_processes',
'riak.memory_processes_used',
'riak.memory_system',
'riak.memory_total',
'riak.sys_monitor_count',
'riak.sys_port_count',
'riak.sys_process_count',
'riak.late_put_fsm_coordinator_ack',
'riak.postcommit_fail',
'riak.precommit_fail',
]
CHECK_GAUGES_STATS = [
'riak.node_get_fsm_counter_time_mean',
'riak.node_get_fsm_counter_time_median',
'riak.node_get_fsm_counter_time_95',
'riak.node_get_fsm_counter_time_99',
'riak.node_get_fsm_counter_time_100',
'riak.node_put_fsm_counter_time_mean',
'riak.node_put_fsm_counter_time_median',
'riak.node_put_fsm_counter_time_95',
'riak.node_put_fsm_counter_time_99',
'riak.node_put_fsm_counter_time_100',
'riak.node_get_fsm_set_time_mean',
'riak.node_get_fsm_set_time_median',
'riak.node_get_fsm_set_time_95',
'riak.node_get_fsm_set_time_99',
'riak.node_get_fsm_set_time_100',
'riak.node_put_fsm_set_time_mean',
'riak.node_put_fsm_set_time_median',
'riak.node_put_fsm_set_time_95',
'riak.node_put_fsm_set_time_99',
'riak.node_put_fsm_set_time_100',
'riak.node_get_fsm_map_time_mean',
'riak.node_get_fsm_map_time_median',
'riak.node_get_fsm_map_time_95',
'riak.node_get_fsm_map_time_99',
'riak.node_get_fsm_map_time_100',
'riak.node_put_fsm_map_time_mean',
'riak.node_put_fsm_map_time_median',
'riak.node_put_fsm_map_time_95',
'riak.node_put_fsm_map_time_99',
'riak.node_put_fsm_map_time_100',
'riak.node_get_fsm_counter_objsize_mean',
'riak.node_get_fsm_counter_objsize_median',
'riak.node_get_fsm_counter_objsize_95',
'riak.node_get_fsm_counter_objsize_99',
'riak.node_get_fsm_counter_objsize_100',
'riak.node_get_fsm_set_objsize_mean',
'riak.node_get_fsm_set_objsize_median',
'riak.node_get_fsm_set_objsize_95',
'riak.node_get_fsm_set_objsize_99',
'riak.node_get_fsm_set_objsize_100',
'riak.node_get_fsm_map_objsize_mean',
'riak.node_get_fsm_map_objsize_median',
'riak.node_get_fsm_map_objsize_95',
'riak.node_get_fsm_map_objsize_99',
'riak.node_get_fsm_map_objsize_100',
'riak.node_get_fsm_counter_siblings_mean',
'riak.node_get_fsm_counter_siblings_median',
'riak.node_get_fsm_counter_siblings_95',
'riak.node_get_fsm_counter_siblings_99',
'riak.node_get_fsm_counter_siblings_100',
'riak.node_get_fsm_set_siblings_mean',
'riak.node_get_fsm_set_siblings_median',
'riak.node_get_fsm_set_siblings_95',
'riak.node_get_fsm_set_siblings_99',
'riak.node_get_fsm_set_siblings_100',
'riak.node_get_fsm_map_siblings_mean',
'riak.node_get_fsm_map_siblings_median',
'riak.node_get_fsm_map_siblings_95',
'riak.node_get_fsm_map_siblings_99',
'riak.node_get_fsm_map_siblings_100',
'riak.object_merge_time_mean',
'riak.object_merge_time_median',
'riak.object_merge_time_95',
'riak.object_merge_time_99',
'riak.object_merge_time_100',
'riak.object_counter_merge_time_mean',
'riak.object_counter_merge_time_median',
'riak.object_counter_merge_time_95',
'riak.object_counter_merge_time_99',
'riak.object_counter_merge_time_100',
'riak.object_set_merge_time_mean',
'riak.object_set_merge_time_median',
'riak.object_set_merge_time_95',
'riak.object_set_merge_time_99',
'riak.object_set_merge_time_100',
'riak.object_map_merge_time_mean',
'riak.object_map_merge_time_median',
'riak.object_map_merge_time_95',
'riak.object_map_merge_time_99',
'riak.object_map_merge_time_100',
'riak.counter_actor_counts_mean',
'riak.counter_actor_counts_median',
'riak.counter_actor_counts_95',
'riak.counter_actor_counts_99',
'riak.counter_actor_counts_100',
'riak.set_actor_counts_mean',
'riak.set_actor_counts_median',
'riak.set_actor_counts_95',
'riak.set_actor_counts_99',
'riak.set_actor_counts_100',
'riak.map_actor_counts_mean',
'riak.map_actor_counts_median',
'riak.map_actor_counts_95',
'riak.map_actor_counts_99',
'riak.map_actor_counts_100',
'riak.vnode_get_fsm_time_mean',
'riak.vnode_get_fsm_time_median',
'riak.vnode_get_fsm_time_95',
'riak.vnode_get_fsm_time_99',
'riak.vnode_get_fsm_time_100',
'riak.vnode_put_fsm_time_mean',
'riak.vnode_put_fsm_time_median',
'riak.vnode_put_fsm_time_95',
'riak.vnode_put_fsm_time_99',
'riak.vnode_put_fsm_time_100',
'riak.vnode_counter_update_time_mean',
'riak.vnode_counter_update_time_median',
'riak.vnode_counter_update_time_95',
'riak.vnode_counter_update_time_99',
'riak.vnode_counter_update_time_100',
'riak.vnode_set_update_time_mean',
'riak.vnode_set_update_time_median',
'riak.vnode_set_update_time_95',
'riak.vnode_set_update_time_99',
'riak.vnode_set_update_time_100',
'riak.vnode_map_update_time_mean',
'riak.vnode_map_update_time_median',
'riak.vnode_map_update_time_95',
'riak.vnode_map_update_time_99',
'riak.vnode_map_update_time_100',
'riak.node_get_fsm_time_95',
'riak.node_get_fsm_time_99',
'riak.node_get_fsm_time_100',
'riak.node_get_fsm_time_mean',
'riak.node_get_fsm_time_median',
'riak.node_get_fsm_siblings_mean',
'riak.node_get_fsm_siblings_median',
'riak.node_get_fsm_siblings_95',
'riak.node_get_fsm_siblings_99',
'riak.node_get_fsm_siblings_100',
'riak.node_get_fsm_objsize_95',
'riak.node_get_fsm_objsize_99',
'riak.node_get_fsm_objsize_100',
'riak.node_get_fsm_objsize_mean',
'riak.node_get_fsm_objsize_median',
'riak.node_put_fsm_time_95',
'riak.node_put_fsm_time_median',
'riak.node_put_fsm_time_100',
'riak.node_put_fsm_time_mean',
'riak.node_put_fsm_time_99',
'riak.riak_kv_vnodeq_mean',
'riak.riak_kv_vnodeq_min',
'riak.riak_kv_vnodeq_max',
'riak.riak_kv_vnodeq_median',
'riak.riak_kv_vnodeq_total',
'riak.riak_pipe_vnodeq_mean',
'riak.riak_pipe_vnodeq_min',
'riak.riak_pipe_vnodeq_max',
'riak.riak_pipe_vnodeq_median',
'riak.riak_pipe_vnodeq_total',
]
# The below metrics for leveldb and read repair
# appear when they have no values, however they
# are displayed as "undefined". The search metrics
# do not appear if search is off.
CHECK_NOT_TESTED = [
'riak.coord_redirs',
'riak.leveldb_read_block_error',
'riak.read_repairs_primary_notfound_one',
'riak.read_repairs_primary_notfound_count',
'riak.read_repairs_primary_outofdate_one',
'riak.read_repairs_primary_outofdate_count',
'riak.read_repairs_fallback_notfound_one',
'riak.read_repairs_fallback_notfound_count',
'riak.read_repairs_fallback_outofdate_one',
'riak.read_repairs_fallback_outofdate_count',
'riak.search_query_latency_mean',
'riak.search_query_latency_min',
'riak.search_query_latency_median',
'riak.search_query_latency_95',
'riak.search_query_latency_99',
'riak.search_query_latency_999',
'riak.search_query_latency_max',
'riak.search_index_latency_mean',
'riak.search_index_latency_min',
'riak.search_index_latency_median',
'riak.search_index_latency_95',
'riak.search_index_latency_99',
'riak.search_index_latency_999',
'riak.search_index_latency_max',
'riak.search_index_fail_one',
'riak.search_index_fail_count',
'riak.search_index_throughput_one',
'riak.search_index_throughput_count',
'riak.search_query_fail_one',
'riak.search_query_fail_count',
'riak.search_query_throughput_one',
'riak.search_query_throughput_count',
]
SERVICE_CHECK_NAME = 'riak.can_connect'
def test_riak(self):
config_dev1 = {
"instances": [{
"url": "http://localhost:18098/stats",
"tags": ["my_tag"]
}]
}
self.run_check_twice(config_dev1)
tags = ['my_tag']
sc_tags = tags + ['url:' + config_dev1['instances'][0]['url']]
for gauge in self.CHECK_GAUGES + self.CHECK_GAUGES_STATS:
self.assertMetric(gauge, count=1, tags=tags)
self.assertServiceCheckOK(self.SERVICE_CHECK_NAME,
tags=sc_tags,
count=1)
# FIXME: disabling coverage for now.
# self.coverage_report()
def test_bad_config(self):
self.assertRaises(
socket.error,
lambda: self.run_check({"instances": [{"url": "http://localhost:5985"}]})
)
sc_tags = ['url:http://localhost:5985']
self.assertServiceCheckCritical(self.SERVICE_CHECK_NAME,
tags=sc_tags,
count=1)
self.coverage_report()
| bsd-3-clause |
cloudRoutine/curveship | input_model.py | 3 | 4432 | 'Represent different user inputs (commands, directives, unrecognized).'
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
class RichInput(object):
'Encapsulates a user input string and information derived from it.'
def __init__(self, input_string, tokens):
self.unrecognized = True
self.command = False
self.directive = False
self._category = 'unrecognized'
self.string = input_string
self.tokens = tokens
self.normal = []
# "tokens" will be reduced to [] in building the normal form, "normal"
self.possible = []
self.caused = None
def __str__(self):
return self.string
def get_category(self):
'Getter for the input category (e.g., "command").'
return self._category
def set_category(self, value):
'Setter for the input category (e.g., "command").'
if value not in ['unrecognized', 'command', 'directive']:
raise StandardError('"' + value + '" was given as an input ' +
'category but is not a valid category.')
self._category = value
self.unrecognized = (value == 'unrecognized')
self.command = (value == 'command')
self.directive = (value == 'directive')
category = property(get_category, set_category)
class InputList(object):
"""Encapsulates all user inputs that have been typed, in order.
Distinguishes between a session (everything since the program has started
running) and a traversal (when the current game started, which might be
because the player typed 'restart.')"""
def __init__(self):
self._all = []
self._traversal_start = 0
def _count(self, category):
"""Counts only those inputs in the specified category.
The first count covers the whole session (everything in the list). The
second only considers the current traversal."""
session = len([i for i in self._all if getattr(i, category)])
traversal = len([i for i in self._all[self._traversal_start:]
if getattr(i, category)])
return (session, traversal)
def latest_command(self):
'Returns the most recently entered command.'
i = len(self._all) - 1
while i >= 0:
if self._all[i].command:
return self._all[i]
i -= 1
def update(self, user_input):
'Adds an input.'
self._all.append(user_input)
def reset(self):
'Sets the list so that the next input will begin a new traversal.'
self._traversal_start = len(self._all)
def total(self):
'Counts inputs in the whole session and in the current traversal.'
session = len(self._all)
traversal = session - self._traversal_start
return (session, traversal)
def show(self, number):
'Produces a nicely-formatted list of up to number inputs.'
full_list = ''
index = max(len(self._all)-number, 0)
begin = index
for i in self._all[begin:]:
index += 1
full_list += str(index) + '. "' + str(i) + '" => ' + i.category
if not i.unrecognized:
full_list += ': ' + ' '.join(i.normal)
full_list += '\n'
if index == self._traversal_start:
full_list += '\n---- Start of Current Traversal ----\n'
return (full_list[:-1])
def undo(self):
"""Changes a command to a directive. Used when the command is undone.
Since the input no longer maps to an Action in this World, it makes
to reclassify it as a directive."""
for i in range(len(self._all)-1, -1, -1):
if self._all[i].command:
self._all[i].category = 'directive'
self._all[i].normal = ['(HYPOTHETICALLY)'] + self._all[i].normal
break
def count_commands(self):
'Counts commands in the session and current traversal.'
return self._count('command')
def count_directives(self):
'Counts directives in the session and current traversal.'
return self._count('directive')
def count_unrecognized(self):
'Counts unrecognized inputs in the session and current traversal.'
return self._count('unrecognized')
| isc |
DenL/pogom-webhook | pogom/pgoapi/protos/POGOProtos/Networking/Requests/Messages/SetContactSettingsMessage_pb2.py | 16 | 2901 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/SetContactSettingsMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data.Player import ContactSettings_pb2 as POGOProtos_dot_Data_dot_Player_dot_ContactSettings__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/SetContactSettingsMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nGPOGOProtos/Networking/Requests/Messages/SetContactSettingsMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\x1a,POGOProtos/Data/Player/ContactSettings.proto\"^\n\x19SetContactSettingsMessage\x12\x41\n\x10\x63ontact_settings\x18\x01 \x01(\x0b\x32\'.POGOProtos.Data.Player.ContactSettingsb\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_Player_dot_ContactSettings__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETCONTACTSETTINGSMESSAGE = _descriptor.Descriptor(
name='SetContactSettingsMessage',
full_name='POGOProtos.Networking.Requests.Messages.SetContactSettingsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='contact_settings', full_name='POGOProtos.Networking.Requests.Messages.SetContactSettingsMessage.contact_settings', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=256,
)
_SETCONTACTSETTINGSMESSAGE.fields_by_name['contact_settings'].message_type = POGOProtos_dot_Data_dot_Player_dot_ContactSettings__pb2._CONTACTSETTINGS
DESCRIPTOR.message_types_by_name['SetContactSettingsMessage'] = _SETCONTACTSETTINGSMESSAGE
SetContactSettingsMessage = _reflection.GeneratedProtocolMessageType('SetContactSettingsMessage', (_message.Message,), dict(
DESCRIPTOR = _SETCONTACTSETTINGSMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.SetContactSettingsMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.SetContactSettingsMessage)
))
_sym_db.RegisterMessage(SetContactSettingsMessage)
# @@protoc_insertion_point(module_scope)
| mit |
averagesecurityguy/AWSlab | aws/securitygroup.py | 1 | 4560 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013, LCI Technology Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of LCI Technology Group nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import logging
import boto.ec2
class SecurityGroup():
def __init__(self, name, conn):
'''
Create a connection to an AWS region and add the security group if it
doesn't exist.
'''
self.__log = logging.getLogger('SecurityGroup')
self.name = name
self.desc = 'Auto-created security group for {0}.'.format(self.name)
self.__conn = conn
self.__sg = self.__get_security_group()
self.add_rule('tcp', 22, 22, '0.0.0.0/0')
def __get_aws_groups(self, name=[]):
'''Return a list of AWS security groups.'''
try:
groups = self.__conn.get_all_security_groups(groupnames=name)
except boto.exception.EC2ResponseError as e:
self.__log.critical(e.message)
raise 'Unable to get AWS security groups.'
return groups
def __create_aws_group(self):
'''Create a new AWS security group.'''
try:
self.__conn.create_security_group(self.name, self.desc)
except boto.exception.EC2ResponseError as e:
self.__log.critical(e.message)
raise 'Could not create AWS security group.'
def __get_security_group(self):
'''
Return a boto.ec2.securitygroup.SecurityGroup object. Create the
security group first, if necessary.
'''
groups = self.__get_aws_groups()
if self.name not in [g.name for g in groups]:
self.__create_aws_group()
return self.__get_aws_groups(self.name)[0]
def add_rule(self, protocol, start, end, source):
'''Add a new rule to the security group, if it doesn't exist.'''
try:
self.__sg.authorize(ip_protocol=protocol,
from_port=start,
to_port=end,
cidr_ip=source)
except boto.exception.EC2ResponseError as e:
self.__log.warning(e.message)
def remove_rule(self, protocol, start, end, source):
'''Remove a rule from the security group.'''
try:
self.__sg.revoke(ip_protocol=protocol,
from_port=start,
to_port=end,
cidr_ip=source)
except boto.exception.EC2ResponseError as e:
self.__log.warning(e.message)
def add_rules(self, rules):
'''Add each rule in the list to the security group.'''
for rule in rules:
self.add_rule(rule[0], rule[1], rule[2], rule[3])
def remove_rules(self, rules):
'''Remove each rule in the list from the security group.'''
for rule in rules:
self.remove_rule(rule[0], rule[1], rule[2], rule[3])
def remove(self):
'''Remove the security group from the region.'''
try:
self.__sg.delete()
except boto.exception.EC2ResponseError as e:
self.__log.error(e.message)
| bsd-3-clause |
rgeleta/odoo | addons/crm_profiling/__init__.py | 438 | 1089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_profiling
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
campbe13/openhatch | vendor/packages/twisted/doc/core/benchmarks/tpclient.py | 22 | 1273 | """Throughput test."""
import time, sys
from twisted.internet import reactor, protocol
from twisted.python import log
TIMES = 10000
S = "0123456789" * 1240
toReceive = len(S) * TIMES
class Sender(protocol.Protocol):
def connectionMade(self):
start()
self.numSent = 0
self.received = 0
self.transport.registerProducer(self, 0)
def stopProducing(self):
pass
def pauseProducing(self):
pass
def resumeProducing(self):
self.numSent += 1
self.transport.write(S)
if self.numSent == TIMES:
self.transport.unregisterProducer()
self.transport.loseConnection()
def connectionLost(self, reason):
shutdown(self.numSent == TIMES)
started = None
def start():
global started
started = time.time()
def shutdown(success):
if not success:
raise SystemExit, "failure or something"
passed = time.time() - started
print "Throughput (send): %s kbytes/sec" % ((toReceive / passed) / 1024)
reactor.stop()
def main():
f = protocol.ClientFactory()
f.protocol = Sender
reactor.connectTCP(sys.argv[1], int(sys.argv[2]), f)
reactor.run()
if __name__ == '__main__':
#log.startLogging(sys.stdout)
main()
| agpl-3.0 |
halostatue/ansible | lib/ansible/plugins/filter/mathstuff.py | 81 | 4025 | # (c) 2014, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import math
import collections
from ansible import errors
def unique(a):
if isinstance(a,collections.Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
def intersect(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) & set(b)
else:
c = unique(filter(lambda x: x in b, a))
return c
def difference(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) - set(b)
else:
c = unique(filter(lambda x: x not in b, a))
return c
def symmetric_difference(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) ^ set(b)
else:
c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
return c
def union(a, b):
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
c = set(a) | set(b)
else:
c = unique(a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a);
def max(a):
_max = __builtins__.get('max')
return _max(a);
def isnotanumber(x):
try:
return math.isnan(x)
except TypeError:
return False
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0/float(base))
except TypeError as e:
raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
def human_readable(size, isbits=False, unit=None):
base = 'bits' if isbits else 'Bytes'
suffix = ''
ranges = (
(1<<70, 'Z'),
(1<<60, 'E'),
(1<<50, 'P'),
(1<<40, 'T'),
(1<<30, 'G'),
(1<<20, 'M'),
(1<<10, 'K'),
(1, base)
)
for limit, suffix in ranges:
if (unit is None and size >= limit) or \
unit is not None and unit.upper() == suffix:
break
if limit != 1:
suffix += base[0]
return '%.2f %s' % (float(size)/ limit, suffix)
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
return {
# general math
'isnan': isnotanumber,
'min' : min,
'max' : max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique' : unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# computer theory
'human_readable' : human_readable,
}
| gpl-3.0 |
ahmadiga/min_edx | common/lib/xmodule/setup.py | 72 | 3491 | from setuptools import setup, find_packages
XMODULES = [
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"library_content = xmodule.library_content_module:LibraryContentDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"split_test = xmodule.split_test_module:SplitTestDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"imageannotation = xmodule.imageannotation_module:ImageAnnotationDescriptor",
"foldit = xmodule.foldit_module:FolditDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
XBLOCKS = [
"library = xmodule.library_root_xblock:LibraryRoot",
"vertical = xmodule.vertical_block:VerticalBlock",
"wrapper = xmodule.wrapper_module:WrapperBlock",
]
setup(
name="XModule",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'setuptools',
'docopt',
'capa',
'path.py',
'webob',
'opaque-keys',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES + XBLOCKS,
'xmodule.v1': XMODULES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
| agpl-3.0 |
ackalker/ocrfeeder | src/ocrfeeder/studio/dataHolder.py | 1 | 8231 | # -*- coding: utf-8 -*-
###########################################################################
# OCRFeeder - The complete OCR suite
# Copyright (C) 2009 Joaquim Rocha
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import os.path
from ocrfeeder.util import graphics
import Image
import gobject
from ocrfeeder.util.graphics import getImagePrintSize, getImageResolution
from ocrfeeder.util import TEXT_TYPE, IMAGE_TYPE, ALIGN_LEFT, lib
from pango import WEIGHT_NORMAL, STYLE_NORMAL
class TextData:
def __init__(self, face = 'Sans', size = 12, justification = ALIGN_LEFT, line_space = 0, letter_space = 0, style = STYLE_NORMAL, weight = WEIGHT_NORMAL):
self.face = face
self.size = size
self.line_space = line_space
self.letter_space = letter_space
self.justification = justification
self.style = style
self.weight = weight
self.angle = 0
def convertToDict(self):
dictionary = lib.getDictFromVariables(['face', 'size', 'line_space',
'letter_space', 'justification', 'angle'], self)
dictionary['style'] = repr(self.style).split(' ')[1].strip('PANGO_')
dictionary['weight'] = repr(self.weight).split(' ')[1].strip('PANGO_')
return {'TextData': dictionary}
class DataBox(gobject.GObject):
__gtype_name__ = 'DataBox'
__gsignals__ = {
'changed_x' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,)),
'changed_y' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,)),
'changed_width' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,)),
'changed_height' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,)),
'changed_image' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
'changed_type' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,))
}
def __init__(self, x = 0, y = 0, width = 0, height = 0, image = None, type = TEXT_TYPE, text = ''):
super(DataBox, self).__init__()
self.x = int(x)
self.y = int(y)
self.width = int(width)
self.height = int(height)
self.image = image
self.setType(type)
self.text_data = TextData()
self.text = text
def configTextData(self, face = 'Sans', size = 12, justification = ALIGN_LEFT, line_space = 1, letter_space = 1):
self.text_data = TextData(face, size, justification, line_space, letter_space)
def setX(self, new_x):
self.x = new_x
self.emit('changed_x', self.x)
def setY(self, new_y):
self.y = new_y
self.emit('changed_y', self.y)
def setWidth(self, new_width):
self.width = new_width
self.emit('changed_width', self.width)
def setHeight(self, new_height):
self.height = new_height
self.emit('changed_height', self.height)
def setImage(self, pixbuf):
self.image = graphics.convertPixbufToImage(pixbuf)
self.emit('changed_image', pixbuf)
def setType(self, new_type):
self.type = new_type
self.emit('changed_type', self.type)
def getType(self):
return self.type
def toogleType(self):
if self.type == TEXT_TYPE:
self.setType(IMAGE_TYPE)
else:
self.setType(TEXT_TYPE)
def setFontFace(self, font_face):
self.text_data.face = font_face
def setFontSize(self, font_size):
self.text_data.size = font_size
def setFontStyle(self, font_style):
self.text_data.style = font_style
def setFontWeight(self, font_weight):
self.text_data.weight = font_weight
def setText(self, text):
self.text = text
def getText(self):
return self.text
def setAngle(self, angle):
self.text_data.angle = angle
def getAngle(self):
return self.text_data.angle
def setTextAlign(self, align_option):
self.text_data.justification = align_option
def setLetterSpacing(self, spacing):
self.text_data.letter_space = spacing
def setLineSpacing(self, spacing):
self.text_data.line_space = spacing
def getLetterSpacing(self):
return self.text_data.letter_space
def getLineSpacing(self):
return self.text_data.line_space
def getX(self):
return self.x
def getY(self):
return self.y
def getWidth(self):
return self.width
def getHeight(self):
return self.height
def getBoundsPrintSize(self, resolution):
x_resolution, y_resolution = float(resolution[0]), float(resolution[1])
x, y, width, height = self.getX(), self.getY(), \
self.getWidth(), self.getHeight()
return x / x_resolution, y / y_resolution, width / x_resolution, height / y_resolution
def convertToDict(self):
dictionary = lib.getDictFromVariables(['x', 'y', 'width',
'height', 'type', 'text'], self)
dictionary['text_data'] = self.text_data.convertToDict()
return {'DataBox': dictionary}
def updateBoundsFromBox(self, box):
x, y, width, height = int(box.props.x), int(box.props.y), \
int(box.props.width), int(box.props.height)
self.setX(x)
self.setY(y)
self.setWidth(width)
self.setHeight(height)
return (x, y, width, height)
class PageData:
def __init__(self, image_path, data_boxes = []):
image = Image.open(image_path)
self.pixel_width, self.pixel_height = image.size
self.image_path = image_path
self.setSize(getImagePrintSize(image))
self.resolution = getImageResolution(image)
self.data_boxes = data_boxes
def setSize(self, page_size):
self.width, self.height = page_size
self.resolution = self.pixel_width / self.width, self.pixel_height / self.height
def setResolution(self, new_resolution):
self.resolution = new_resolution
def convertToDict(self):
dictionary = lib.getDictFromVariables(['pixel_width', 'pixel_height', 'image_path', 'resolution'], self)
data_boxes_converted = [data_box.convertToDict() for data_box in self.data_boxes]
dictionary['data_boxes'] = data_boxes_converted
return {'PageData': dictionary}
def getTextFromBoxes(self, data_boxes=None):
text = ''
if data_boxes is None:
data_boxes = self.data_boxes
number_of_boxes = len(data_boxes)
for i in range(number_of_boxes):
data_box = data_boxes[i]
if data_box and data_box.getType() != TEXT_TYPE:
continue
text += data_box.getText()
if number_of_boxes > 1 and i < number_of_boxes - 1:
text += '\n\n'
return text
def create_images_dict_from_liststore(list_store):
images_dict = {}
iter = list_store.get_iter_root()
while iter != None:
pixbuf = list_store.get_value(iter, 2)
image_path = list_store.get_value(iter, 0)
images_dict[pixbuf] = image_path
iter = list_store.iter_next(iter)
return images_dict
| gpl-3.0 |
michael-ball/sublime-text | sublime-text-3/Packages/Python PEP8 Autoformat/libs/py26/lib2to3/refactor.py | 7 | 24179 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import StringIO
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from . import pytree, pygram
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace(u"\r\n", u"\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace(u"\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
def advance():
tok = gen.next()
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == u"from":
tp, value = advance()
if tp != token.NAME or value != u"__future__":
break
tp, value = advance()
if tp != token.NAME or value != u"import":
break
tp, value = advance()
if tp == token.OP and value == u"(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != u",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.pre_order_heads = _get_headnode_dict(self.pre_order)
self.post_order_heads = _get_headnode_dict(self.post_order)
self.files = [] # List of files that were or should be modified
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if not name.startswith(".") and \
os.path.splitext(name)[1].endswith("py"):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError, err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += u"\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if tree and tree.was_changed:
# The [:-1] is to take off the \n we added earlier
self.processed_file(unicode(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception, err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if tree and tree.was_changed:
self.processed_file(unicode(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
self.traverse_by(self.pre_order_heads, tree.pre_order())
self.traverse_by(self.post_order_heads, tree.post_order())
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored, and there are changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error, err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error, err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + u"\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return u"".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception, err:
if self.log.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip(u"\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = unicode(tree).splitlines(True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == [u"\n"] * (lineno-1), clipped
if not new[-1].endswith(u"\n"):
new[-1] += u"\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + u"\n":
yield u"\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in xrange(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
| unlicense |
tuhangdi/django | tests/gis_tests/test_geoip.py | 75 | 6731 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
import unittest
import warnings
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip import HAS_GEOIP
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.test import ignore_warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
if HAS_GEOIP:
from django.contrib.gis.geoip import GeoIP, GeoIPException
from django.contrib.gis.geoip.prototypes import GeoIP_lib_version
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
@ignore_warnings(category=RemovedInDjango20Warning)
class GeoIPTest(unittest.TestCase):
addr = '128.249.1.1'
fqdn = 'tmc.edu'
def _is_dns_available(self, domain):
# Naive check to see if there is DNS available to use.
# Used to conditionally skip fqdn geoip checks.
# See #25407 for details.
ErrClass = socket.error if six.PY2 else OSError
try:
socket.gethostbyname(domain)
return True
except ErrClass:
return False
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
queries = [self.addr]
if self._is_dns_available(self.fqdn):
queries.append(self.fqdn)
for query in queries:
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query), 'Failed for func %s and query %s' % (func, query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query), 'Failed for func %s and query %s' % (func, query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
queries = [self.addr]
if self._is_dns_available(self.fqdn):
queries.append(self.fqdn)
for query in queries:
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
fqdn = "duesseldorf.de"
if self._is_dns_available(fqdn):
d = g.city(fqdn)
self.assertEqual('Düsseldorf', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
GeoIP()
self.assertEqual(len(warns), 1)
msg = str(warns[0].message)
self.assertIn('django.contrib.gis.geoip is deprecated', msg)
def test_repr(self):
path = settings.GEOIP_PATH
g = GeoIP(path=path)
country_path = g._country_file
city_path = g._city_file
if GeoIP_lib_version:
expected = '<GeoIP [v%(version)s] _country_file="%(country)s", _city_file="%(city)s">' % {
'version': force_text(GeoIP_lib_version()),
'country': country_path,
'city': city_path,
}
else:
expected = '<GeoIP _country_file="%(country)s", _city_file="%(city)s">' % {
'country': country_path,
'city': city_path,
}
self.assertEqual(repr(g), expected)
| bsd-3-clause |
pra85/calibre | src/calibre/ebooks/docx/toc.py | 6 | 4783 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import namedtuple
from lxml.etree import tostring
from calibre.ebooks.docx.names import XPath, descendants, get, ancestor
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.oeb.polish.toc import elem_to_toc_text
class Count(object):
__slots__ = ('val',)
def __init__(self):
self.val = 0
def from_headings(body, log):
' Create a TOC from headings in the document '
headings = ('h1', 'h2', 'h3')
tocroot = TOC()
xpaths = [XPath('//%s' % x) for x in headings]
level_prev = {i+1:None for i in xrange(len(xpaths))}
level_prev[0] = tocroot
level_item_map = {i+1:frozenset(xp(body)) for i, xp in enumerate(xpaths)}
item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems}
idcount = Count()
def ensure_id(elem):
ans = elem.get('id', None)
if not ans:
idcount.val += 1
ans = 'toc_id_%d' % idcount.val
elem.set('id', ans)
return ans
for item in descendants(body, *headings):
lvl = plvl = item_level_map.get(item, None)
if lvl is None:
continue
parent = None
while parent is None:
plvl -= 1
parent = level_prev[plvl]
lvl = plvl + 1
elem_id = ensure_id(item)
text = elem_to_toc_text(item)
toc = parent.add_item('index.html', elem_id, text)
level_prev[lvl] = toc
for i in xrange(lvl+1, len(xpaths)+1):
level_prev[i] = None
if len(tuple(tocroot.flat())) > 1:
log('Generating Table of Contents from headings')
return tocroot
def structure_toc(entries):
indent_vals = sorted({x.indent for x in entries})
last_found = [None for i in indent_vals]
newtoc = TOC()
if len(indent_vals) > 6:
for x in entries:
newtoc.add_item('index.html', x.anchor, x.text)
return newtoc
def find_parent(level):
candidates = last_found[:level]
for x in reversed(candidates):
if x is not None:
return x
return newtoc
for item in entries:
level = indent_vals.index(item.indent)
parent = find_parent(level)
last_found[level] = parent.add_item('index.html', item.anchor,
item.text)
for i in xrange(level+1, len(last_found)):
last_found[i] = None
return newtoc
def link_to_txt(a, styles, object_map):
if len(a) > 1:
for child in a:
run = object_map.get(child, None)
if run is not None:
rs = styles.resolve(run)
if rs.css.get('display', None) == 'none':
a.remove(child)
return tostring(a, method='text', with_tail=False, encoding=unicode).strip()
def from_toc(docx, link_map, styles, object_map, log):
toc_level = None
level = 0
TI = namedtuple('TI', 'text anchor indent')
toc = []
for tag in XPath('//*[(@w:fldCharType and name()="w:fldChar") or name()="w:hyperlink" or name()="w:instrText"]')(docx):
n = tag.tag.rpartition('}')[-1]
if n == 'fldChar':
t = get(tag, 'w:fldCharType')
if t == 'begin':
level += 1
elif t == 'end':
level -= 1
if toc_level is not None and level < toc_level:
break
elif n == 'instrText':
if level > 0 and tag.text and tag.text.strip().startswith('TOC '):
toc_level = level
elif n == 'hyperlink':
if toc_level is not None and level >= toc_level and tag in link_map:
a = link_map[tag]
href = a.get('href', None)
txt = link_to_txt(a, styles, object_map)
p = ancestor(tag, 'w:p')
if txt and href and p is not None:
ps = styles.resolve_paragraph(p)
try:
ml = int(ps.margin_left[:-2])
except (TypeError, ValueError, AttributeError):
ml = 0
if ps.text_align in {'center', 'right'}:
ml = 0
toc.append(TI(txt, href[1:], ml))
if toc:
log('Found Word Table of Contents, using it to generate the Table of Contents')
return structure_toc(toc)
def create_toc(docx, body, link_map, styles, object_map, log):
return from_toc(docx, link_map, styles, object_map, log) or from_headings(body, log)
| gpl-3.0 |
mruddy/bitcoin | test/functional/feature_loadblock.py | 11 | 3583 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test loadblock option
Test the option to start a node with the option loadblock which loads
a serialized blockchain from a file (usually called bootstrap.dat).
To generate that file this test uses the helper scripts available
in contrib/linearize.
"""
import os
import subprocess
import sys
import tempfile
import urllib
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class LoadblockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.nodes[1].setnetworkactive(state=False)
self.nodes[0].generate(COINBASE_MATURITY)
# Parsing the url of our node to get settings for config file
data_dir = self.nodes[0].datadir
node_url = urllib.parse.urlparse(self.nodes[0].url)
cfg_file = os.path.join(data_dir, "linearize.cfg")
bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat")
genesis_block = self.nodes[0].getblockhash(0)
blocks_dir = os.path.join(data_dir, self.chain, "blocks")
hash_list = tempfile.NamedTemporaryFile(dir=data_dir,
mode='w',
delete=False,
encoding="utf-8")
self.log.info("Create linearization config file")
with open(cfg_file, "a", encoding="utf-8") as cfg:
cfg.write("datadir={}\n".format(data_dir))
cfg.write("rpcuser={}\n".format(node_url.username))
cfg.write("rpcpassword={}\n".format(node_url.password))
cfg.write("port={}\n".format(node_url.port))
cfg.write("host={}\n".format(node_url.hostname))
cfg.write("output_file={}\n".format(bootstrap_file))
cfg.write("max_height=100\n")
cfg.write("netmagic=fabfb5da\n")
cfg.write("input={}\n".format(blocks_dir))
cfg.write("genesis={}\n".format(genesis_block))
cfg.write("hashlist={}\n".format(hash_list.name))
base_dir = self.config["environment"]["SRCDIR"]
linearize_dir = os.path.join(base_dir, "contrib", "linearize")
self.log.info("Run linearization of block hashes")
linearize_hashes_file = os.path.join(linearize_dir, "linearize-hashes.py")
subprocess.run([sys.executable, linearize_hashes_file, cfg_file],
stdout=hash_list,
check=True)
self.log.info("Run linearization of block data")
linearize_data_file = os.path.join(linearize_dir, "linearize-data.py")
subprocess.run([sys.executable, linearize_data_file, cfg_file],
check=True)
self.log.info("Restart second, unsynced node with bootstrap file")
self.restart_node(1, extra_args=["-loadblock=" + bootstrap_file])
assert_equal(self.nodes[1].getblockcount(), 100) # start_node is blocking on all block files being imported
assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100)
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
if __name__ == '__main__':
LoadblockTest().main()
| mit |
anryko/ansible | lib/ansible/modules/cloud/packet/packet_sshkey.py | 21 | 8829 | #!/usr/bin/python
# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: packet_sshkey
short_description: Create/delete an SSH key in Packet host.
description:
- Create/delete an SSH key in Packet host.
- API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
version_added: "2.3"
author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
auth_token:
description:
- Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
label:
description:
- Label for the key. If you keep it empty, it will be read from key string.
id:
description:
- UUID of the key which you want to remove.
fingerprint:
description:
- Fingerprint of the key which you want to remove.
key:
description:
- Public Key string ({type} {base64 encoded key} {description}).
key_file:
description:
- File with the public key.
requirements:
- "python >= 2.6"
- packet-python
'''
EXAMPLES = '''
# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
# You can also pass the api token in module param auth_token.
- name: create sshkey from string
hosts: localhost
tasks:
packet_sshkey:
key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
- name: create sshkey from file
hosts: localhost
tasks:
packet_sshkey:
label: key from file
key_file: ~/ff.pub
- name: remove sshkey by id
hosts: localhost
tasks:
packet_sshkey:
state: absent
id: eef49903-7a09-4ca1-af67-4087c29ab5b6
'''
RETURN = '''
changed:
description: True if a sshkey was created or removed.
type: bool
sample: True
returned: always
sshkeys:
description: Information about sshkeys that were createe/removed.
type: list
sample: [
{
"fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
"id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
"key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
"label": "mynewkey33"
}
]
returned: always
''' # NOQA
import os
import uuid
from ansible.module_utils.basic import AnsibleModule
HAS_PACKET_SDK = True
try:
import packet
except ImportError:
HAS_PACKET_SDK = False
PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
def serialize_sshkey(sshkey):
sshkey_data = {}
copy_keys = ['id', 'key', 'label', 'fingerprint']
for name in copy_keys:
sshkey_data[name] = getattr(sshkey, name)
return sshkey_data
def is_valid_uuid(myuuid):
try:
val = uuid.UUID(myuuid, version=4)
except ValueError:
return False
return str(val) == myuuid
def load_key_string(key_str):
ret_dict = {}
key_str = key_str.strip()
ret_dict['key'] = key_str
cut_key = key_str.split()
if len(cut_key) in [2, 3]:
if len(cut_key) == 3:
ret_dict['label'] = cut_key[2]
else:
raise Exception("Public key %s is in wrong format" % key_str)
return ret_dict
def get_sshkey_selector(module):
key_id = module.params.get('id')
if key_id:
if not is_valid_uuid(key_id):
raise Exception("sshkey ID %s is not valid UUID" % key_id)
selecting_fields = ['label', 'fingerprint', 'id', 'key']
select_dict = {}
for f in selecting_fields:
if module.params.get(f) is not None:
select_dict[f] = module.params.get(f)
if module.params.get('key_file'):
with open(module.params.get('key_file')) as _file:
loaded_key = load_key_string(_file.read())
select_dict['key'] = loaded_key['key']
if module.params.get('label') is None:
if loaded_key.get('label'):
select_dict['label'] = loaded_key['label']
def selector(k):
if 'key' in select_dict:
# if key string is specified, compare only the key strings
return k.key == select_dict['key']
else:
# if key string not specified, all the fields must match
return all([select_dict[f] == getattr(k, f) for f in select_dict])
return selector
def act_on_sshkeys(target_state, module, packet_conn):
selector = get_sshkey_selector(module)
existing_sshkeys = packet_conn.list_ssh_keys()
matching_sshkeys = filter(selector, existing_sshkeys)
changed = False
if target_state == 'present':
if matching_sshkeys == []:
# there is no key matching the fields from module call
# => create the key, label and
newkey = {}
if module.params.get('key_file'):
with open(module.params.get('key_file')) as f:
newkey = load_key_string(f.read())
if module.params.get('key'):
newkey = load_key_string(module.params.get('key'))
if module.params.get('label'):
newkey['label'] = module.params.get('label')
for param in ('label', 'key'):
if param not in newkey:
_msg = ("If you want to ensure a key is present, you must "
"supply both a label and a key string, either in "
"module params, or in a key file. %s is missing"
% param)
raise Exception(_msg)
matching_sshkeys = []
new_key_response = packet_conn.create_ssh_key(
newkey['label'], newkey['key'])
changed = True
matching_sshkeys.append(new_key_response)
else:
# state is 'absent' => delete matching keys
for k in matching_sshkeys:
try:
k.delete()
changed = True
except Exception as e:
_msg = ("while trying to remove sshkey %s, id %s %s, "
"got error: %s" %
(k.label, k.id, target_state, e))
raise Exception(_msg)
return {
'changed': changed,
'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
}
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
no_log=True),
label=dict(type='str', aliases=['name'], default=None),
id=dict(type='str', default=None),
fingerprint=dict(type='str', default=None),
key=dict(type='str', default=None, no_log=True),
key_file=dict(type='path', default=None),
),
mutually_exclusive=[
('label', 'id'),
('label', 'fingerprint'),
('id', 'fingerprint'),
('key', 'fingerprint'),
('key', 'id'),
('key_file', 'key'),
]
)
if not HAS_PACKET_SDK:
module.fail_json(msg='packet required for this module')
if not module.params.get('auth_token'):
_fail_msg = ("if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)
auth_token = module.params.get('auth_token')
packet_conn = packet.Manager(auth_token=auth_token)
state = module.params.get('state')
if state in ['present', 'absent']:
try:
module.exit_json(**act_on_sshkeys(state, module, packet_conn))
except Exception as e:
module.fail_json(msg='failed to set sshkey state: %s' % str(e))
else:
module.fail_json(msg='%s is not a valid state for this module' % state)
if __name__ == '__main__':
main()
| gpl-3.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/service_now_object_dataset.py | 1 | 1845 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset import Dataset
class ServiceNowObjectDataset(Dataset):
"""ServiceNow server dataset.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param type: Constant filled by server.
:type type: str
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
def __init__(self, linked_service_name, additional_properties=None, description=None, structure=None, parameters=None):
super(ServiceNowObjectDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters)
self.type = 'ServiceNowObject'
| mit |
aferr/LatticeMemCtl | tests/long/se/60.bzip2/test.py | 21 | 1751 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
m5.util.addToPath('../configs/common')
from cpu2000 import bzip2_source
workload = bzip2_source(isa, opsys, 'lgred')
root.system.cpu.workload = workload.makeLiveProcess()
| bsd-3-clause |
tersmitten/ansible | lib/ansible/module_utils/facts/network/fc_wwn.py | 66 | 3450 | # Fibre Channel WWN initiator related facts collection for ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import glob
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
class FcWwnInitiatorFactCollector(BaseFactCollector):
name = 'fibre_channel_wwn'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Example contents /sys/class/fc_host/*/port_name:
0x21000014ff52a9bb
"""
fc_facts = {}
fc_facts['fibre_channel_wwn'] = []
if sys.platform.startswith('linux'):
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
for line in get_file_lines(fcfile):
fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:])
elif sys.platform.startswith('sunos'):
"""
on solaris 10 or solaris 11 should use `fcinfo hba-port`
TBD (not implemented): on solaris 9 use `prtconf -pv`
"""
cmd = module.get_bin_path('fcinfo')
cmd = cmd + " hba-port"
rc, fcinfo_out, err = module.run_command(cmd)
"""
# fcinfo hba-port | grep "Port WWN"
HBA Port WWN: 10000090fa1658de
"""
if fcinfo_out:
for line in fcinfo_out.splitlines():
if 'Port WWN' in line:
data = line.split(' ')
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
elif sys.platform.startswith('aix'):
# get list of available fibre-channel devices (fcs)
cmd = module.get_bin_path('lsdev')
cmd = cmd + " -Cc adapter -l fcs*"
rc, lsdev_out, err = module.run_command(cmd)
if lsdev_out:
lscfg_cmd = module.get_bin_path('lscfg')
for line in lsdev_out.splitlines():
# if device is available (not in defined state), get its WWN
if 'Available' in line:
data = line.split(' ')
cmd = lscfg_cmd + " -vl %s" % data[0]
rc, lscfg_out, err = module.run_command(cmd)
# example output
# lscfg -vpl fcs3 | grep "Network Address"
# Network Address.............10000090FA551509
for line in lscfg_out.splitlines():
if 'Network Address' in line:
data = line.split('.')
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
return fc_facts
| gpl-3.0 |
wyom/sympy | sympy/polys/agca/tests/test_homomorphisms.py | 121 | 4182 | """Tests for homomorphisms."""
from sympy import QQ, S
from sympy.abc import x, y
from sympy.polys.agca import homomorphism
from sympy.utilities.pytest import raises
def test_printing():
R = QQ.old_poly_ring(x)
assert str(homomorphism(R.free_module(1), R.free_module(1), [0])) == \
'Matrix([[0]]) : QQ[x]**1 -> QQ[x]**1'
assert str(homomorphism(R.free_module(2), R.free_module(2), [0, 0])) == \
'Matrix([ \n[0, 0], : QQ[x]**2 -> QQ[x]**2\n[0, 0]]) '
assert str(homomorphism(R.free_module(1), R.free_module(1) / [[x]], [0])) == \
'Matrix([[0]]) : QQ[x]**1 -> QQ[x]**1/<[x]>'
assert str(R.free_module(0).identity_hom()) == 'Matrix(0, 0, []) : QQ[x]**0 -> QQ[x]**0'
def test_operations():
F = QQ.old_poly_ring(x).free_module(2)
G = QQ.old_poly_ring(x).free_module(3)
f = F.identity_hom()
g = homomorphism(F, F, [0, [1, x]])
h = homomorphism(F, F, [[1, 0], 0])
i = homomorphism(F, G, [[1, 0, 0], [0, 1, 0]])
assert f == f
assert f != g
assert f != i
assert (f != F.identity_hom()) is False
assert 2*f == f*2 == homomorphism(F, F, [[2, 0], [0, 2]])
assert f/2 == homomorphism(F, F, [[S(1)/2, 0], [0, S(1)/2]])
assert f + g == homomorphism(F, F, [[1, 0], [1, x + 1]])
assert f - g == homomorphism(F, F, [[1, 0], [-1, 1 - x]])
assert f*g == g == g*f
assert h*g == homomorphism(F, F, [0, [1, 0]])
assert g*h == homomorphism(F, F, [0, 0])
assert i*f == i
assert f([1, 2]) == [1, 2]
assert g([1, 2]) == [2, 2*x]
assert i.restrict_domain(F.submodule([x, x]))([x, x]) == i([x, x])
h1 = h.quotient_domain(F.submodule([0, 1]))
assert h1([1, 0]) == h([1, 0])
assert h1.restrict_domain(h1.domain.submodule([x, 0]))([x, 0]) == h([x, 0])
raises(TypeError, lambda: f/g)
raises(TypeError, lambda: f + 1)
raises(TypeError, lambda: f + i)
raises(TypeError, lambda: f - 1)
raises(TypeError, lambda: f*i)
def test_creation():
F = QQ.old_poly_ring(x).free_module(3)
G = QQ.old_poly_ring(x).free_module(2)
SM = F.submodule([1, 1, 1])
Q = F / SM
SQ = Q.submodule([1, 0, 0])
matrix = [[1, 0], [0, 1], [-1, -1]]
h = homomorphism(F, G, matrix)
h2 = homomorphism(Q, G, matrix)
assert h.quotient_domain(SM) == h2
raises(ValueError, lambda: h.quotient_domain(F.submodule([1, 0, 0])))
assert h2.restrict_domain(SQ) == homomorphism(SQ, G, matrix)
raises(ValueError, lambda: h.restrict_domain(G))
raises(ValueError, lambda: h.restrict_codomain(G.submodule([1, 0])))
raises(ValueError, lambda: h.quotient_codomain(F))
im = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
for M in [F, SM, Q, SQ]:
assert M.identity_hom() == homomorphism(M, M, im)
assert SM.inclusion_hom() == homomorphism(SM, F, im)
assert SQ.inclusion_hom() == homomorphism(SQ, Q, im)
assert Q.quotient_hom() == homomorphism(F, Q, im)
assert SQ.quotient_hom() == homomorphism(SQ.base, SQ, im)
class conv(object):
def convert(x, y=None):
return x
class dummy(object):
container = conv()
def submodule(*args):
return None
raises(TypeError, lambda: homomorphism(dummy(), G, matrix))
raises(TypeError, lambda: homomorphism(F, dummy(), matrix))
raises(
ValueError, lambda: homomorphism(QQ.old_poly_ring(x, y).free_module(3), G, matrix))
raises(ValueError, lambda: homomorphism(F, G, [0, 0]))
def test_properties():
R = QQ.old_poly_ring(x, y)
F = R.free_module(2)
h = homomorphism(F, F, [[x, 0], [y, 0]])
assert h.kernel() == F.submodule([-y, x])
assert h.image() == F.submodule([x, 0], [y, 0])
assert not h.is_injective()
assert not h.is_surjective()
assert h.restrict_codomain(h.image()).is_surjective()
assert h.restrict_domain(F.submodule([1, 0])).is_injective()
assert h.quotient_domain(
h.kernel()).restrict_codomain(h.image()).is_isomorphism()
R2 = QQ.old_poly_ring(x, y, order=(("lex", x), ("ilex", y))) / [x**2 + 1]
F = R2.free_module(2)
h = homomorphism(F, F, [[x, 0], [y, y + 1]])
assert h.is_isomorphism()
| bsd-3-clause |
azureplus/hue | desktop/core/ext-py/Paste-2.0.1/paste/debug/wdg_validate.py | 50 | 4268 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Middleware that tests the validity of all generated HTML using the
`WDG HTML Validator <http://www.htmlhelp.com/tools/validator/>`_
"""
from cStringIO import StringIO
import subprocess
from paste.response import header_value
import re
import cgi
__all__ = ['WDGValidateMiddleware']
class WDGValidateMiddleware(object):
"""
Middleware that checks HTML and appends messages about the validity of
the HTML. Uses: http://www.htmlhelp.com/tools/validator/ -- interacts
with the command line client. Use the configuration ``wdg_path`` to
override the path (default: looks for ``validate`` in $PATH).
To install, in your web context's __init__.py::
def urlparser_wrap(environ, start_response, app):
return wdg_validate.WDGValidateMiddleware(app)(
environ, start_response)
Or in your configuration::
middleware.append('paste.wdg_validate.WDGValidateMiddleware')
"""
_end_body_regex = re.compile(r'</body>', re.I)
def __init__(self, app, global_conf=None, wdg_path='validate'):
self.app = app
self.wdg_path = wdg_path
def __call__(self, environ, start_response):
output = StringIO()
response = []
def writer_start_response(status, headers, exc_info=None):
response.extend((status, headers))
start_response(status, headers, exc_info)
return output.write
app_iter = self.app(environ, writer_start_response)
try:
for s in app_iter:
output.write(s)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
page = output.getvalue()
status, headers = response
v = header_value(headers, 'content-type') or ''
if (not v.startswith('text/html')
and not v.startswith('text/xhtml')
and not v.startswith('application/xhtml')):
# Can't validate
# @@: Should validate CSS too... but using what?
return [page]
ops = []
if v.startswith('text/xhtml+xml'):
ops.append('--xml')
# @@: Should capture encoding too
html_errors = self.call_wdg_validate(
self.wdg_path, ops, page)
if html_errors:
page = self.add_error(page, html_errors)[0]
headers.remove(
('Content-Length',
str(header_value(headers, 'content-length'))))
headers.append(('Content-Length', str(len(page))))
return [page]
def call_wdg_validate(self, wdg_path, ops, page):
if subprocess is None:
raise ValueError(
"This middleware requires the subprocess module from "
"Python 2.4")
proc = subprocess.Popen([wdg_path] + ops,
shell=False,
close_fds=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = proc.communicate(page)[0]
proc.wait()
return stdout
def add_error(self, html_page, html_errors):
add_text = ('<pre style="background-color: #ffd; color: #600; '
'border: 1px solid #000;">%s</pre>'
% cgi.escape(html_errors))
match = self._end_body_regex.search(html_page)
if match:
return [html_page[:match.start()]
+ add_text
+ html_page[match.start():]]
else:
return [html_page + add_text]
def make_wdg_validate_middleware(
app, global_conf, wdg_path='validate'):
"""
Wraps the application in the WDG validator from
http://www.htmlhelp.com/tools/validator/
Validation errors are appended to the text of each page.
You can configure this by giving the path to the validate
executable (by default picked up from $PATH)
"""
return WDGValidateMiddleware(
app, global_conf, wdg_path=wdg_path)
| apache-2.0 |
mce35/agocontrol | devices/wifi370/agowifi370.py | 2 | 3556 | #!/usr/bin/env python
import socket
import agoclient
client = agoclient.AgoConnection("wifi370")
COMMAND_ON="\xcc\x23\x33"
COMMAND_OFF="\xcc\x24\x33"
# COMMAND_RGB="\x56\xRR\xGG\xBB\xaa"
COMMAND_STATUS="\xef\x01\x77"
try:
deviceconfig = agoclient.get_config_option("wifi370", "devices", "192.168.80.44:5577")
devices = map(str, deviceconfig.split(','))
except e:
devices = None
print "Error, no devices:" + e
else:
for device in devices:
client.add_device(device, "dimmerrgb")
def sendcmd(host, port, command):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(command)
s.close()
except socket.error as msg:
print "Socket error: ", msg
def getstatus(host, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(COMMAND_STATUS)
reply = s.recv(11)
s.close()
if ord(reply[0])==0x66 and ord(reply[10]) == 0x99:
red = ord(reply[6])
green = ord(reply[7])
blue = ord(reply[8])
if ord(reply[2])==0x23:
onoff=255
else:
onoff=0
# mode = ord(reply[3])
# startstop = ord(reply[4])
# usermem = ord(reply[9])
return (onoff, red, green, blue)
else:
print "ERROR: cannot get status from " + host + ":" + port
except socket.error as msg:
print "Socket error: ", msg
def messageHandler(internalid, content):
host, _port = internalid.split(':')
port = int(_port)
if "command" in content:
if content["command"] == "on":
print "switching on: " + internalid
sendcmd(host,port,COMMAND_ON)
try:
(onoff, red, green, blue) = getstatus(host, port)
client.emit_event(internalid, "event.device.statechanged", str(onoff), "")
except TypeError:
print "ERROR: Can't read status"
if content["command"] == "off":
print "switching off: " + internalid
sendcmd(host,port,COMMAND_OFF)
try:
(onoff, red, green, blue) = getstatus(host, port)
client.emit_event(internalid, "event.device.statechanged", str(onoff), "")
except TypeError:
print "ERROR: Can't read status"
if content["command"] == "setlevel":
level = content["level"]
print "setting level:", internalid, level
value = int(level) * 255 / 100
command = "\x56" + chr(value) + chr(value) + chr(value) + "\xaa"
sendcmd(host,port,COMMAND_ON)
sendcmd(host,port,command)
try:
(onoff, red, green, blue) = getstatus(host, port)
if onoff == 0:
client.emit_event(internalid, "event.device.statechanged", str(onoff), "")
else:
client.emit_event(internalid, "event.device.statechanged", str((red + green + blue)*100/3/255), "")
except TypeError:
print "ERROR: Can't read status"
client.emit_event(internalid, "event.device.statechanged", level, "")
if content["command"] == "setcolor":
red = int(content["red"]) * 255 / 100
green = int(content["green"]) * 255 / 100
blue = int(content["blue"]) * 255 / 100
command = "\x56" + chr(red) + chr(green) + chr(blue) + "\xaa"
sendcmd(host,port,COMMAND_ON)
sendcmd(host,port,command)
try:
(onoff, red, green, blue) = getstatus(host, port)
if onoff == 0:
client.emit_event(internalid, "event.device.statechanged", str(onoff), "")
else:
client.emit_event(internalid, "event.device.statechanged", str((red + green + blue)*100/3/255), "")
#client.emit_event(internalid, "event.device.statechanged", str(red) + "/" + str(green) + "/" + str(blue), "")
except TypeError:
print "ERROR: Can't read status"
client.add_handler(messageHandler)
print "Waiting for messages"
client.run()
| gpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/oauth2client/tests/test_gce.py | 17 | 3904 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for oauth2client.gce.
Unit tests for oauth2client.gce.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import unittest
import httplib2
import mock
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import Credentials
from oauth2client.client import save_to_well_known_file
from oauth2client.gce import AppAssertionCredentials
class AssertionCredentialsTests(unittest.TestCase):
def test_good_refresh(self):
http = mock.MagicMock()
http.request = mock.MagicMock(
return_value=(mock.Mock(status=200),
'{"accessToken": "this-is-a-token"}'))
c = AppAssertionCredentials(scope=['http://example.com/a',
'http://example.com/b'])
self.assertEquals(None, c.access_token)
c.refresh(http)
self.assertEquals('this-is-a-token', c.access_token)
http.request.assert_called_once_with(
'http://metadata.google.internal/0.1/meta-data/service-accounts/'
'default/acquire'
'?scope=http%3A%2F%2Fexample.com%2Fa%20http%3A%2F%2Fexample.com%2Fb')
def test_fail_refresh(self):
http = mock.MagicMock()
http.request = mock.MagicMock(return_value=(mock.Mock(status=400), '{}'))
c = AppAssertionCredentials(scope=['http://example.com/a',
'http://example.com/b'])
self.assertRaises(AccessTokenRefreshError, c.refresh, http)
def test_to_from_json(self):
c = AppAssertionCredentials(scope=['http://example.com/a',
'http://example.com/b'])
json = c.to_json()
c2 = Credentials.new_from_json(json)
self.assertEqual(c.access_token, c2.access_token)
def test_create_scoped_required_without_scopes(self):
credentials = AppAssertionCredentials([])
self.assertTrue(credentials.create_scoped_required())
def test_create_scoped_required_with_scopes(self):
credentials = AppAssertionCredentials(['dummy_scope'])
self.assertFalse(credentials.create_scoped_required())
def test_create_scoped(self):
credentials = AppAssertionCredentials([])
new_credentials = credentials.create_scoped(['dummy_scope'])
self.assertNotEqual(credentials, new_credentials)
self.assertTrue(isinstance(new_credentials, AppAssertionCredentials))
self.assertEqual('dummy_scope', new_credentials.scope)
def test_get_access_token(self):
http = mock.MagicMock()
http.request = mock.MagicMock(
return_value=(mock.Mock(status=200),
'{"accessToken": "this-is-a-token"}'))
credentials = AppAssertionCredentials(['dummy_scope'])
token = credentials.get_access_token(http=http)
self.assertEqual('this-is-a-token', token.access_token)
self.assertEqual(None, token.expires_in)
http.request.assert_called_once_with(
'http://metadata.google.internal/0.1/meta-data/service-accounts/'
'default/acquire?scope=dummy_scope')
def test_save_to_well_known_file(self):
import os
ORIGINAL_ISDIR = os.path.isdir
try:
os.path.isdir = lambda path: True
credentials = AppAssertionCredentials([])
self.assertRaises(NotImplementedError, save_to_well_known_file,
credentials)
finally:
os.path.isdir = ORIGINAL_ISDIR
| mit |
GaetanCambier/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youku.py | 36 | 3928 | # coding: utf-8
from __future__ import unicode_literals
import math
import random
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YoukuIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TEST = {
'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
'params': {
'test': False
},
'info_dict': {
'id': 'XNDgyMDQ2NTQw_part00',
'ext': 'flv',
'title': 'youtube-dl test video "\'/\\ä↭𝕐'
}
}
def _gen_sid(self):
nowTime = int(time.time() * 1000)
random1 = random.randint(1000, 1998)
random2 = random.randint(1000, 9999)
return "%d%d%d" % (nowTime, random1, random2)
def _get_file_ID_mix_string(self, seed):
mixed = []
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
seed = float(seed)
for i in range(len(source)):
seed = (seed * 211 + 30031) % 65536
index = math.floor(seed / 65536 * len(source))
mixed.append(source[int(index)])
source.remove(source[int(index)])
# return ''.join(mixed)
return mixed
def _get_file_id(self, fileId, seed):
mixed = self._get_file_ID_mix_string(seed)
ids = fileId.split('*')
realId = []
for ch in ids:
if ch:
realId.append(mixed[int(ch)])
return ''.join(realId)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
config = self._download_json(info_url, video_id)
error_code = config['data'][0].get('error_code')
if error_code:
# -8 means blocked outside China.
error = config['data'][0].get('error') # Chinese and English, separated by newline.
raise ExtractorError(error or 'Server reported error %i' % error_code,
expected=True)
video_title = config['data'][0]['title']
seed = config['data'][0]['seed']
format = self._downloader.params.get('format', None)
supported_format = list(config['data'][0]['streamfileids'].keys())
# TODO proper format selection
if format is None or format == 'best':
if 'hd2' in supported_format:
format = 'hd2'
else:
format = 'flv'
ext = 'flv'
elif format == 'worst':
format = 'mp4'
ext = 'mp4'
else:
format = 'flv'
ext = 'flv'
fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]]
# segs is usually a dictionary, but an empty *list* if an error occured.
files_info = []
sid = self._gen_sid()
fileid = self._get_file_id(fileid, seed)
# column 8,9 of fileid represent the segment number
# fileid[7:9] should be changed
for index, key in enumerate(keys):
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
info = {
'id': '%s_part%02d' % (video_id, index),
'url': download_url,
'uploader': None,
'upload_date': None,
'title': video_title,
'ext': ext,
}
files_info.append(info)
return files_info
| gpl-3.0 |
vladmm/intellij-community | plugins/hg4idea/testData/bin/mercurial/httppeer.py | 93 | 9303 | # httppeer.py - HTTP repository proxy classes for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid
from i18n import _
import changegroup, statichttprepo, error, httpconnection, url, util, wireproto
import os, urllib, urllib2, zlib, httplib
import errno, socket
def zgenerator(f):
zd = zlib.decompressobj()
try:
for chunk in util.filechunkiter(f):
while chunk:
yield zd.decompress(chunk, 2**18)
chunk = zd.unconsumed_tail
except httplib.HTTPException:
raise IOError(None, _('connection ended unexpectedly'))
yield zd.flush()
class httppeer(wireproto.wirepeer):
def __init__(self, ui, path):
self.path = path
self.caps = None
self.handler = None
self.urlopener = None
u = util.url(path)
if u.query or u.fragment:
raise util.Abort(_('unsupported URL component: "%s"') %
(u.query or u.fragment))
# urllib cannot handle URLs with embedded user or passwd
self._url, authinfo = u.authinfo()
self.ui = ui
self.ui.debug('using %s\n' % self._url)
self.urlopener = url.opener(ui, authinfo)
def __del__(self):
if self.urlopener:
for h in self.urlopener.handlers:
h.close()
getattr(h, "close_all", lambda : None)()
def url(self):
return self.path
# look up capabilities only when needed
def _fetchcaps(self):
self.caps = set(self._call('capabilities').split())
def _capabilities(self):
if self.caps is None:
try:
self._fetchcaps()
except error.RepoError:
self.caps = set()
self.ui.debug('capabilities: %s\n' %
(' '.join(self.caps or ['none'])))
return self.caps
def lock(self):
raise util.Abort(_('operation not supported over http'))
def _callstream(self, cmd, **args):
if cmd == 'pushkey':
args['data'] = ''
data = args.pop('data', None)
size = 0
if util.safehasattr(data, 'length'):
size = data.length
elif data is not None:
size = len(data)
headers = args.pop('headers', {})
if data is not None and 'Content-Type' not in headers:
headers['Content-Type'] = 'application/mercurial-0.1'
if size and self.ui.configbool('ui', 'usehttp2', False):
headers['Expect'] = '100-Continue'
headers['X-HgHttp2'] = '1'
self.ui.debug("sending %s command\n" % cmd)
q = [('cmd', cmd)]
headersize = 0
if len(args) > 0:
httpheader = self.capable('httpheader')
if httpheader:
headersize = int(httpheader.split(',')[0])
if headersize > 0:
# The headers can typically carry more data than the URL.
encargs = urllib.urlencode(sorted(args.items()))
headerfmt = 'X-HgArg-%s'
contentlen = headersize - len(headerfmt % '000' + ': \r\n')
headernum = 0
for i in xrange(0, len(encargs), contentlen):
headernum += 1
header = headerfmt % str(headernum)
headers[header] = encargs[i:i + contentlen]
varyheaders = [headerfmt % str(h) for h in range(1, headernum + 1)]
headers['Vary'] = ','.join(varyheaders)
else:
q += sorted(args.items())
qs = '?%s' % urllib.urlencode(q)
cu = "%s%s" % (self._url, qs)
req = urllib2.Request(cu, data, headers)
if data is not None:
self.ui.debug("sending %s bytes\n" % size)
req.add_unredirected_header('Content-Length', '%d' % size)
try:
resp = self.urlopener.open(req)
except urllib2.HTTPError, inst:
if inst.code == 401:
raise util.Abort(_('authorization failed'))
raise
except httplib.HTTPException, inst:
self.ui.debug('http error while sending %s command\n' % cmd)
self.ui.traceback()
raise IOError(None, inst)
except IndexError:
# this only happens with Python 2.3, later versions raise URLError
raise util.Abort(_('http error, possibly caused by proxy setting'))
# record the url we got redirected to
resp_url = resp.geturl()
if resp_url.endswith(qs):
resp_url = resp_url[:-len(qs)]
if self._url.rstrip('/') != resp_url.rstrip('/'):
if not self.ui.quiet:
self.ui.warn(_('real URL is %s\n') % resp_url)
self._url = resp_url
try:
proto = resp.getheader('content-type')
except AttributeError:
proto = resp.headers.get('content-type', '')
safeurl = util.hidepassword(self._url)
if proto.startswith('application/hg-error'):
raise error.OutOfBandError(resp.read())
# accept old "text/plain" and "application/hg-changegroup" for now
if not (proto.startswith('application/mercurial-') or
(proto.startswith('text/plain')
and not resp.headers.get('content-length')) or
proto.startswith('application/hg-changegroup')):
self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
raise error.RepoError(
_("'%s' does not appear to be an hg repository:\n"
"---%%<--- (%s)\n%s\n---%%<---\n")
% (safeurl, proto or 'no content-type', resp.read(1024)))
if proto.startswith('application/mercurial-'):
try:
version = proto.split('-', 1)[1]
version_info = tuple([int(n) for n in version.split('.')])
except ValueError:
raise error.RepoError(_("'%s' sent a broken Content-Type "
"header (%s)") % (safeurl, proto))
if version_info > (0, 1):
raise error.RepoError(_("'%s' uses newer protocol %s") %
(safeurl, version))
return resp
def _call(self, cmd, **args):
fp = self._callstream(cmd, **args)
try:
return fp.read()
finally:
# if using keepalive, allow connection to be reused
fp.close()
def _callpush(self, cmd, cg, **args):
# have to stream bundle to a temp file because we do not have
# http 1.1 chunked transfer.
types = self.capable('unbundle')
try:
types = types.split(',')
except AttributeError:
# servers older than d1b16a746db6 will send 'unbundle' as a
# boolean capability. They only support headerless/uncompressed
# bundles.
types = [""]
for x in types:
if x in changegroup.bundletypes:
type = x
break
tempname = changegroup.writebundle(cg, None, type)
fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
headers = {'Content-Type': 'application/mercurial-0.1'}
try:
try:
r = self._call(cmd, data=fp, headers=headers, **args)
vals = r.split('\n', 1)
if len(vals) < 2:
raise error.ResponseError(_("unexpected response:"), r)
return vals
except socket.error, err:
if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
raise util.Abort(_('push failed: %s') % err.args[1])
raise util.Abort(err.args[1])
finally:
fp.close()
os.unlink(tempname)
def _abort(self, exception):
raise exception
def _decompress(self, stream):
return util.chunkbuffer(zgenerator(stream))
class httpspeer(httppeer):
def __init__(self, ui, path):
if not url.has_https:
raise util.Abort(_('Python support for SSL and HTTPS '
'is not installed'))
httppeer.__init__(self, ui, path)
def instance(ui, path, create):
if create:
raise util.Abort(_('cannot create new http repository'))
try:
if path.startswith('https:'):
inst = httpspeer(ui, path)
else:
inst = httppeer(ui, path)
try:
# Try to do useful work when checking compatibility.
# Usually saves a roundtrip since we want the caps anyway.
inst._fetchcaps()
except error.RepoError:
# No luck, try older compatibility check.
inst.between([(nullid, nullid)])
return inst
except error.RepoError, httpexception:
try:
r = statichttprepo.instance(ui, "static-" + path, create)
ui.note('(falling back to static-http)\n')
return r
except error.RepoError:
raise httpexception # use the original http RepoError instead
| apache-2.0 |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/sqlite3/test/transactions.py | 93 | 7338 | #-*- coding: iso-8859-1 -*-
# pysqlite2/test/transactions.py: tests transactions
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import os, unittest
import sqlite3 as sqlite
def get_db_path():
return "sqlite_testdb"
class TransactionTests(unittest.TestCase):
def setUp(self):
try:
os.remove(get_db_path())
except OSError:
pass
self.con1 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur1 = self.con1.cursor()
self.con2 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur2 = self.con2.cursor()
def tearDown(self):
self.cur1.close()
self.con1.close()
self.cur2.close()
self.con2.close()
try:
os.unlink(get_db_path())
except OSError:
pass
def CheckDMLdoesAutoCommitBefore(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur1.execute("create table test2(j)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckInsertStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 0)
def CheckUpdateStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("update test set i=6")
self.cur2.execute("select i from test")
res = self.cur2.fetchone()[0]
self.assertEqual(res, 5)
def CheckDeleteStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("delete from test")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckReplaceStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("replace into test(i) values (6)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.assertEqual(res[0][0], 5)
def CheckToggleAutoCommit(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.isolation_level = None
self.assertEqual(self.con1.isolation_level, None)
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.con1.isolation_level = "DEFERRED"
self.assertEqual(self.con1.isolation_level , "DEFERRED")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckRaiseTimeout(self):
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
def CheckLocking(self):
"""
This tests the improved concurrency with pysqlite 2.3.4. You needed
to roll back con2 before you could commit con1.
"""
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
# NO self.con2.rollback() HERE!!!
self.con1.commit()
def CheckRollbackCursorConsistency(self):
"""
Checks if cursors on the connection are set into a "reset" state
when a rollback is done on the connection.
"""
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.execute("create table test(x)")
cur.execute("insert into test(x) values (5)")
cur.execute("select 1 union select 2 union select 3")
con.rollback()
try:
cur.fetchall()
self.fail("InterfaceError should have been raised")
except sqlite.InterfaceError as e:
pass
except:
self.fail("InterfaceError should have been raised")
class SpecialCommandTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
def CheckVacuum(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("vacuum")
def CheckDropTable(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("drop table test")
def CheckPragma(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("pragma count_changes=1")
def tearDown(self):
self.cur.close()
self.con.close()
def suite():
default_suite = unittest.makeSuite(TransactionTests, "Check")
special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
return unittest.TestSuite((default_suite, special_command_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| lgpl-3.0 |
Nikoli/youtube-dl | youtube_dl/extractor/mgoon.py | 177 | 2695 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
)
class MgoonIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
video\.mgoon\.com)/(?P<id>[0-9]+)'''
_API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
_TESTS = [
{
'url': 'http://m.mgoon.com/ch/hi6618/v/5582148',
'md5': 'dd46bb66ab35cf6d51cc812fd82da79d',
'info_dict': {
'id': '5582148',
'uploader_id': 'hi6618',
'duration': 240.419,
'upload_date': '20131220',
'ext': 'mp4',
'title': 'md5:543aa4c27a4931d371c3f433e8cebebc',
'thumbnail': 're:^https?://.*\.jpg$',
}
},
{
'url': 'http://www.mgoon.com/play/view/5582148',
'only_matching': True,
},
{
'url': 'http://video.mgoon.com/5582148',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
data = self._download_json(self._API_URL.format(video_id), video_id)
if data.get('errorInfo', {}).get('code') != 'NONE':
raise ExtractorError('%s encountered an error: %s' % (
self.IE_NAME, data['errorInfo']['message']), expected=True)
v_info = data['videoInfo']
title = v_info.get('v_title')
thumbnail = v_info.get('v_thumbnail')
duration = v_info.get('v_duration')
upload_date = unified_strdate(v_info.get('v_reg_date'))
uploader_id = data.get('userInfo', {}).get('u_alias')
if duration:
duration /= 1000.0
age_limit = None
if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT':
age_limit = 18
formats = []
get_quality = qualities(['360p', '480p', '720p', '1080p'])
for fmt in data['videoFiles']:
formats.append({
'format_id': fmt['label'],
'quality': get_quality(fmt['label']),
'url': fmt['url'],
'ext': fmt['format'],
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader_id': uploader_id,
'age_limit': age_limit,
}
| unlicense |
liangwang/m5 | src/mem/slicc/ast/ExprStatementAST.py | 33 | 2360 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
from slicc.symbols import Type
class ExprStatementAST(StatementAST):
def __init__(self, slicc, expr):
super(ExprStatementAST, self).__init__(slicc)
self.expr = expr
def __repr__(self):
return "[ExprStatementAST: %s]" % (self.expr)
def generate(self, code, return_type):
actual_type,rcode = self.expr.inline(True)
code("$rcode;")
# The return type must be void
if actual_type != self.symtab.find("void", Type):
self.expr.error("Non-void return must not be ignored, " + \
"return type is '%s'", actual_type.ident)
def findResources(self, resources):
self.expr.findResources(resources)
| bsd-3-clause |
tvibliani/odoo | addons/base_action_rule/__openerp__.py | 260 | 1896 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Action Rules',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows to implement action rules for any object.
============================================================
Use automated actions to automatically trigger actions for various screens.
**Example:** A lead created by a specific user may be automatically set to a specific
sales team, or an opportunity which still has status pending after 14 days might
trigger an automatic reminder email.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base', 'resource', 'mail'],
'data': [
'base_action_rule_data.xml',
'base_action_rule_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mguzdial3/MineCode | python-codec/src/tests/test_message_agent.py | 2 | 1608 | #
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 446 $
# $Date: 2009-01-22 20:20:21 -0700 (Thu, 22 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/tests/test_message_agent.py $
import random
import sys
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
from rlglue.types import Observation
class test_message_agent(Agent):
whichEpisode=0
def agent_init(self,taskSpec):
pass
def agent_start(self,observation):
return Action()
def agent_step(self,reward, observation):
return Action()
def agent_end(self,reward):
pass
def agent_cleanup(self):
pass
def agent_message(self,inMessage):
if inMessage==None:
return "null"
if inMessage=="":
return "empty"
if inMessage=="null":
return None
if inMessage=="empty":
return ""
return inMessage;
if __name__=="__main__":
AgentLoader.loadAgent(test_message_agent()) | apache-2.0 |
fevxie/odoo | addons/base_action_rule/__openerp__.py | 260 | 1896 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Action Rules',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows to implement action rules for any object.
============================================================
Use automated actions to automatically trigger actions for various screens.
**Example:** A lead created by a specific user may be automatically set to a specific
sales team, or an opportunity which still has status pending after 14 days might
trigger an automatic reminder email.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base', 'resource', 'mail'],
'data': [
'base_action_rule_data.xml',
'base_action_rule_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sallaire/Sick-Beard | sickbeard/traktWatchListChecker.py | 30 | 6994 | # Author: Frank Fenton
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import sickbeard
from sickbeard import encodingKludge as ek
from sickbeard import logger,db
from sickbeard import helpers
from sickbeard import search_queue
from sickbeard.common import SNATCHED, SNATCHED_PROPER, SNATCHED_FRENCH, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN
from lib.trakt import *
class TraktChecker():
def __init__(self):
self.todoWanted = []
self.todoBacklog = []
def run(self):
if sickbeard.TRAKT_USE_WATCHLIST:
self.todoWanted = [] #its about to all get re-added
if len(sickbeard.ROOT_DIRS.split('|')) < 2:
logger.log(u"No default root directory", logger.ERROR)
return
self.updateShows()
self.updateEpisodes()
def updateShows(self):
logger.log(u"Starting trakt show watchlist check", logger.DEBUG)
watchlist = TraktCall("user/watchlist/shows.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
if watchlist is None:
logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.DEBUG)
return
for show in watchlist:
if int(sickbeard.TRAKT_METHOD_ADD) != 2:
self.addDefaultShow(show["tvdb_id"], show["title"], SKIPPED)
else:
self.addDefaultShow(show["tvdb_id"], show["title"], WANTED)
if int(sickbeard.TRAKT_METHOD_ADD) == 1:
newShow = helpers.findCertainShow(sickbeard.showList, int(show["tvdb_id"]))
if newShow is not None:
self.setEpisodeToWanted(newShow, 1, 1)
self.startBacklog(newShow)
else:
self.todoWanted.append((int(show["tvdb_id"]), 1, 1))
if int(sickbeard.TRAKT_METHOD_ADD) == 3:
newShow = helpers.findCertainShow(sickbeard.showList, int(show["tvdb_id"]))
if newShow is not None:
for ep in range(1,4):
self.setEpisodeToWanted(newShow, 1, ep)
self.startBacklog(newShow)
else:
for ep in range(1,4):
self.todoWanted.append((int(show["tvdb_id"]), 1, ep))
#self.todoWanted.append((int(show["tvdb_id"]), -1, -1)) #used to pause new shows if the settings say to
def updateEpisodes(self):
"""
Sets episodes to wanted that are in trakt watchlist
"""
logger.log(u"Starting trakt episode watchlist check", logger.DEBUG)
watchlist = TraktCall("user/watchlist/episodes.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
if watchlist is None:
logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.DEBUG)
return
for show in watchlist:
self.addDefaultShow(show["tvdb_id"], show["title"], SKIPPED)
newShow = helpers.findCertainShow(sickbeard.showList, int(show["tvdb_id"]))
for episode in show["episodes"]:
if newShow is not None:
self.setEpisodeToWanted(newShow, episode["season"], episode["number"])
else:
self.todoWanted.append((int(show["tvdb_id"]), episode["season"], episode["number"]))
self.startBacklog(newShow)
def addDefaultShow(self, tvdbid, name, status):
"""
Adds a new show with the default settings
"""
showObj = helpers.findCertainShow(sickbeard.showList, int(tvdbid))
if showObj != None:
return
logger.log(u"Adding show " + tvdbid)
root_dirs = sickbeard.ROOT_DIRS.split('|')
location = root_dirs[int(root_dirs[0]) + 1]
showPath = ek.ek(os.path.join, location, helpers.sanitizeFileName(name))
dir_exists = helpers.makeDir(showPath)
if not dir_exists:
logger.log(u"Unable to create the folder " + showPath + ", can't add the show", logger.ERROR)
return
else:
helpers.chmodAsParent(showPath)
sickbeard.showQueueScheduler.action.addShow(int(tvdbid), showPath, status, int(sickbeard.QUALITY_DEFAULT), int(sickbeard.FLATTEN_FOLDERS_DEFAULT),"fr", int(sickbeard.SUBTITLES_DEFAULT), sickbeard.AUDIO_SHOW_DEFAULT)
def setEpisodeToWanted(self, show, s, e):
"""
Sets an episode to wanted, only is it is currently skipped
"""
epObj = show.getEpisode(int(s), int(e))
if epObj == None:
return
with epObj.lock:
if epObj.status != SKIPPED:
return
logger.log(u"Setting episode s"+str(s)+"e"+str(e)+" of show " + show.name + " to wanted")
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
epObj.status = WANTED
epObj.saveToDB()
backlog = (show, ep_segment)
if self.todoBacklog.count(backlog)==0:
self.todoBacklog.append(backlog)
def manageNewShow(self, show):
episodes = [i for i in self.todoWanted if i[0] == show.tvdbid]
for episode in episodes:
self.todoWanted.remove(episode)
if episode[1] == -1 and sickbeard.TRAKT_START_PAUSED:
show.paused = 1
continue
self.setEpisodeToWanted(show, episode[1], episode[2])
self.startBacklog(show)
def startBacklog(self, show):
segments = [i for i in self.todoBacklog if i[0] == show]
for segment in segments:
cur_backlog_queue_item = search_queue.BacklogQueueItem(show, segment[1])
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item)
logger.log(u"Starting backlog for " + show.name + " season " + str(segment[1]) + " because some eps were set to wanted")
self.todoBacklog.remove(segment)
| gpl-3.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/lib2to3/main.py | 250 | 11605 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, "At least one file or directory argument required."
print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print >> sys.stderr, "Sorry, -j isn't " \
"supported on this platform."
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| gpl-2.0 |
snnn/tensorflow | tensorflow/python/estimator/estimator_lib.py | 20 | 2867 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator: High level tools for working with models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.python.estimator.canned.baseline import BaselineClassifier
from tensorflow.python.estimator.canned.baseline import BaselineRegressor
from tensorflow.python.estimator.canned.boosted_trees import BoostedTreesClassifier
from tensorflow.python.estimator.canned.boosted_trees import BoostedTreesRegressor
from tensorflow.python.estimator.canned.dnn import DNNClassifier
from tensorflow.python.estimator.canned.dnn import DNNRegressor
from tensorflow.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.python.estimator.canned.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.python.estimator.canned.linear import LinearClassifier
from tensorflow.python.estimator.canned.linear import LinearRegressor
from tensorflow.python.estimator.canned.parsing_utils import classifier_parse_example_spec
from tensorflow.python.estimator.canned.parsing_utils import regressor_parse_example_spec
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.estimator import VocabInfo
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.estimator.export import export_lib as export
from tensorflow.python.estimator.exporter import Exporter
from tensorflow.python.estimator.exporter import FinalExporter
from tensorflow.python.estimator.exporter import LatestExporter
from tensorflow.python.estimator.inputs import inputs
from tensorflow.python.estimator.keras import model_to_estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.training import EvalSpec
from tensorflow.python.estimator.training import train_and_evaluate
from tensorflow.python.estimator.training import TrainSpec
# pylint: enable=unused-import,line-too-long,wildcard-import
| apache-2.0 |
mx3L/enigma2 | lib/python/Screens/Satconfig.py | 12 | 36944 | from enigma import eDVBDB
from Screen import Screen
from Components.SystemInfo import SystemInfo
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.NimManager import nimmanager
from Components.Button import Button
from Components.Label import Label
from Components.SelectionList import SelectionList, SelectionEntryComponent
from Components.config import getConfigListEntry, config, ConfigNothing, ConfigSelection, updateConfigElement, ConfigSatlist, ConfigYesNo, configfile
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.ServiceStopScreen import ServiceStopScreen
from Screens.AutoDiseqc import AutoDiseqc
from Tools.BoundFunction import boundFunction
from time import mktime, localtime
from datetime import datetime
class NimSetup(Screen, ConfigListScreen, ServiceStopScreen):
def createSimpleSetup(self, list, mode):
nim = self.nimConfig
if mode == "single":
self.singleSatEntry = getConfigListEntry(_("Satellite"), nim.diseqcA)
list.append(self.singleSatEntry)
if nim.diseqcA.value in ("360", "560"):
list.append(getConfigListEntry(_("Use circular LNB"), nim.simpleDiSEqCSetCircularLNB))
list.append(getConfigListEntry(_("Send DiSEqC"), nim.simpleSingleSendDiSEqC))
else:
list.append(getConfigListEntry(_("Port A"), nim.diseqcA))
if mode in ("toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
list.append(getConfigListEntry(_("Port B"), nim.diseqcB))
if mode == "diseqc_a_b_c_d":
list.append(getConfigListEntry(_("Port C"), nim.diseqcC))
list.append(getConfigListEntry(_("Port D"), nim.diseqcD))
if mode != "toneburst_a_b":
list.append(getConfigListEntry(_("Set voltage and 22KHz"), nim.simpleDiSEqCSetVoltageTone))
list.append(getConfigListEntry(_("Send DiSEqC only on satellite change"), nim.simpleDiSEqCOnlyOnSatChange))
def createPositionerSetup(self, list):
nim = self.nimConfig
if nim.diseqcMode.value == "positioner_select":
self.selectSatsEntry = getConfigListEntry(_("Press OK to select satellites"), self.nimConfig.pressOKtoList)
list.append(self.selectSatsEntry)
list.append(getConfigListEntry(_("Longitude"), nim.longitude))
list.append(getConfigListEntry(" ", nim.longitudeOrientation))
list.append(getConfigListEntry(_("Latitude"), nim.latitude))
list.append(getConfigListEntry(" ", nim.latitudeOrientation))
if SystemInfo["CanMeasureFrontendInputPower"]:
self.advancedPowerMeasurement = getConfigListEntry(_("Use power measurement"), nim.powerMeasurement)
list.append(self.advancedPowerMeasurement)
if nim.powerMeasurement.value:
list.append(getConfigListEntry(_("Power threshold in mA"), nim.powerThreshold))
self.turningSpeed = getConfigListEntry(_("Rotor turning speed"), nim.turningSpeed)
list.append(self.turningSpeed)
if nim.turningSpeed.value == "fast epoch":
self.turnFastEpochBegin = getConfigListEntry(_("Begin time"), nim.fastTurningBegin)
self.turnFastEpochEnd = getConfigListEntry(_("End time"), nim.fastTurningEnd)
list.append(self.turnFastEpochBegin)
list.append(self.turnFastEpochEnd)
else:
if nim.powerMeasurement.value:
nim.powerMeasurement.value = False
nim.powerMeasurement.save()
if not hasattr(self, 'additionalMotorOptions'):
self.additionalMotorOptions = ConfigYesNo(False)
self.showAdditionalMotorOptions = getConfigListEntry(_("Extra motor options"), self.additionalMotorOptions)
self.list.append(self.showAdditionalMotorOptions)
if self.additionalMotorOptions.value:
self.list.append(getConfigListEntry(" " + _("Horizontal turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedH))
self.list.append(getConfigListEntry(" " + _("Vertical turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedV))
self.list.append(getConfigListEntry(" " + _("Turning step size") + " [" + chr(176) + "]", nim.tuningstepsize))
self.list.append(getConfigListEntry(" " + _("Max memory positions"), nim.rotorPositions))
def createConfigMode(self):
if self.nim.isCompatible("DVB-S"):
choices = {"nothing": _("Not configured"),
"simple": _("Simple"),
"advanced": _("Advanced")}
if len(nimmanager.canEqualTo(self.slotid)) > 0:
choices["equal"] = _("Equal to")
if len(nimmanager.canDependOn(self.slotid)) > 0:
choices["satposdepends"] = _("Second cable of motorized LNB")
if len(nimmanager.canConnectTo(self.slotid)) > 0:
choices["loopthrough"] = _("Loop through to")
self.nimConfig.configMode.setChoices(choices, default = "simple")
def createSetup(self):
print "Creating setup"
self.list = [ ]
self.multiType = None
self.configMode = None
self.diseqcModeEntry = None
self.advancedSatsEntry = None
self.advancedLnbsEntry = None
self.advancedDiseqcMode = None
self.advancedUsalsEntry = None
self.advancedLof = None
self.advancedPowerMeasurement = None
self.turningSpeed = None
self.turnFastEpochBegin = None
self.turnFastEpochEnd = None
self.toneburst = None
self.committedDiseqcCommand = None
self.uncommittedDiseqcCommand = None
self.commandOrder = None
self.cableScanType = None
self.have_advanced = False
self.advancedUnicable = None
self.advancedType = None
self.advancedManufacturer = None
self.advancedSCR = None
self.advancedConnected = None
self.showAdditionalMotorOptions = None
self.selectSatsEntry = None
self.advancedSelectSatsEntry = None
self.singleSatEntry = None
if self.nim.isMultiType():
multiType = self.nimConfig.multiType
self.multiType = getConfigListEntry(_("Tuner type"), multiType)
self.list.append(self.multiType)
if self.nim.isCompatible("DVB-S"):
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
if self.nimConfig.configMode.value == "simple": #simple setup
self.diseqcModeEntry = getConfigListEntry(pgettext("Satellite configuration mode", "Mode"), self.nimConfig.diseqcMode)
self.list.append(self.diseqcModeEntry)
if self.nimConfig.diseqcMode.value in ("single", "toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
self.createSimpleSetup(self.list, self.nimConfig.diseqcMode.value)
if self.nimConfig.diseqcMode.value in ("positioner", "positioner_select"):
self.createPositionerSetup(self.list)
elif self.nimConfig.configMode.value == "equal":
choices = []
nimlist = nimmanager.canEqualTo(self.nim.slot)
for id in nimlist:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Tuner"), self.nimConfig.connectedTo))
elif self.nimConfig.configMode.value == "satposdepends":
choices = []
nimlist = nimmanager.canDependOn(self.nim.slot)
for id in nimlist:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Tuner"), self.nimConfig.connectedTo))
elif self.nimConfig.configMode.value == "loopthrough":
choices = []
print "connectable to:", nimmanager.canConnectTo(self.slotid)
connectable = nimmanager.canConnectTo(self.slotid)
for id in connectable:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Connected to"), self.nimConfig.connectedTo))
elif self.nimConfig.configMode.value == "nothing":
pass
elif self.nimConfig.configMode.value == "advanced": # advanced
# SATs
self.advancedSatsEntry = getConfigListEntry(_("Satellite"), self.nimConfig.advanced.sats)
self.list.append(self.advancedSatsEntry)
current_config_sats = self.nimConfig.advanced.sats.value
if current_config_sats in ("3605", "3606"):
self.advancedSelectSatsEntry = getConfigListEntry(_("Press OK to select satellites"), self.nimConfig.pressOKtoList)
self.list.append(self.advancedSelectSatsEntry)
self.fillListWithAdvancedSatEntrys(self.nimConfig.advanced.sat[int(current_config_sats)])
else:
cur_orb_pos = self.nimConfig.advanced.sats.orbital_position
satlist = self.nimConfig.advanced.sat.keys()
if cur_orb_pos is not None:
if cur_orb_pos not in satlist:
cur_orb_pos = satlist[0]
self.fillListWithAdvancedSatEntrys(self.nimConfig.advanced.sat[cur_orb_pos])
self.have_advanced = True
if self.nim.description == "Alps BSBE2" and config.usage.setup_level.index >= 2: # expert
self.list.append(getConfigListEntry(_("Tone amplitude"), self.nimConfig.toneAmplitude))
elif self.nim.isCompatible("DVB-C"):
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
if self.nimConfig.configMode.value == "enabled":
self.list.append(getConfigListEntry(_("Network ID"), self.nimConfig.cable.scan_networkid))
self.cableScanType=getConfigListEntry(_("Used service scan type"), self.nimConfig.cable.scan_type)
self.list.append(self.cableScanType)
if self.nimConfig.cable.scan_type.value == "provider":
self.list.append(getConfigListEntry(_("Provider to scan"), self.nimConfig.cable.scan_provider))
else:
if self.nimConfig.cable.scan_type.value == "bands":
# TRANSLATORS: option name, indicating which type of (DVB-C) band should be scanned. The name of the band is printed in '%s'. E.g.: 'Scan EU MID band'
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU VHF I"), self.nimConfig.cable.scan_band_EU_VHF_I))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU MID"), self.nimConfig.cable.scan_band_EU_MID))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU VHF III"), self.nimConfig.cable.scan_band_EU_VHF_III))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU UHF IV"), self.nimConfig.cable.scan_band_EU_UHF_IV))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU UHF V"), self.nimConfig.cable.scan_band_EU_UHF_V))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU SUPER"), self.nimConfig.cable.scan_band_EU_SUPER))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU HYPER"), self.nimConfig.cable.scan_band_EU_HYPER))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US LOW"), self.nimConfig.cable.scan_band_US_LOW))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US MID"), self.nimConfig.cable.scan_band_US_MID))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US HIGH"), self.nimConfig.cable.scan_band_US_HIGH))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US SUPER"), self.nimConfig.cable.scan_band_US_SUPER))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US HYPER"), self.nimConfig.cable.scan_band_US_HYPER))
elif self.nimConfig.cable.scan_type.value == "steps":
self.list.append(getConfigListEntry(_("Frequency scan step size(khz)"), self.nimConfig.cable.scan_frequency_steps))
# TRANSLATORS: option name, indicating which type of (DVB-C) modulation should be scanned. The modulation type is printed in '%s'. E.g.: 'Scan QAM16'
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM16"), self.nimConfig.cable.scan_mod_qam16))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM32"), self.nimConfig.cable.scan_mod_qam32))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM64"), self.nimConfig.cable.scan_mod_qam64))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM128"), self.nimConfig.cable.scan_mod_qam128))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM256"), self.nimConfig.cable.scan_mod_qam256))
self.list.append(getConfigListEntry(_("Scan %s") % ("SR6900"), self.nimConfig.cable.scan_sr_6900))
self.list.append(getConfigListEntry(_("Scan %s") % ("SR6875"), self.nimConfig.cable.scan_sr_6875))
self.list.append(getConfigListEntry(_("Scan additional SR"), self.nimConfig.cable.scan_sr_ext1))
self.list.append(getConfigListEntry(_("Scan additional SR"), self.nimConfig.cable.scan_sr_ext2))
self.have_advanced = False
elif self.nim.isCompatible("DVB-T"):
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
self.have_advanced = False
if self.nimConfig.configMode.value == "enabled":
self.list.append(getConfigListEntry(_("Terrestrial provider"), self.nimConfig.terrestrial))
self.list.append(getConfigListEntry(_("Enable 5V for active antenna"), self.nimConfig.terrestrial_5V))
else:
self.have_advanced = False
self["config"].list = self.list
self["config"].l.setList(self.list)
def newConfig(self):
self.setTextKeyBlue()
checkList = (self.configMode, self.diseqcModeEntry, self.advancedSatsEntry, \
self.advancedLnbsEntry, self.advancedDiseqcMode, self.advancedUsalsEntry, \
self.advancedLof, self.advancedPowerMeasurement, self.turningSpeed, \
self.advancedType, self.advancedSCR, self.advancedManufacturer, self.advancedUnicable, self.advancedConnected, \
self.toneburst, self.committedDiseqcCommand, self.uncommittedDiseqcCommand, self.singleSatEntry, \
self.commandOrder, self.showAdditionalMotorOptions, self.cableScanType, self.multiType)
if self["config"].getCurrent() == self.multiType:
from Components.NimManager import InitNimManager
InitNimManager(nimmanager)
self.nim = nimmanager.nim_slots[self.slotid]
self.nimConfig = self.nim.config
for x in checkList:
if self["config"].getCurrent() == x:
self.createSetup()
break
def run(self):
if self.nimConfig.configMode.value == "simple":
autodiseqc_ports = 0
if self.nimConfig.diseqcMode.value == "single":
if self.nimConfig.diseqcA.orbital_position == 3600:
autodiseqc_ports = 1
elif self.nimConfig.diseqcMode.value == "diseqc_a_b":
if self.nimConfig.diseqcA.orbital_position == 3600 or self.nimConfig.diseqcB.orbital_position == 3600:
autodiseqc_ports = 2
elif self.nimConfig.diseqcMode.value == "diseqc_a_b_c_d":
if self.nimConfig.diseqcA.orbital_position == 3600 or self.nimConfig.diseqcB.orbital_position == 3600 or self.nimConfig.diseqcC.orbital_position == 3600 or self.nimConfig.diseqcD.orbital_position == 3600:
autodiseqc_ports = 4
if autodiseqc_ports:
self.autoDiseqcRun(autodiseqc_ports)
return False
if self.have_advanced and self.nim.config_mode == "advanced":
self.fillAdvancedList()
for x in self.list:
if x in (self.turnFastEpochBegin, self.turnFastEpochEnd):
# workaround for storing only hour*3600+min*60 value in configfile
# not really needed.. just for cosmetics..
tm = localtime(x[1].value)
dt = datetime(1970, 1, 1, tm.tm_hour, tm.tm_min)
x[1].value = int(mktime(dt.timetuple()))
x[1].save()
nimmanager.sec.update()
self.saveAll()
return True
def autoDiseqcRun(self, ports):
self.session.openWithCallback(self.autoDiseqcCallback, AutoDiseqc, self.slotid, ports, self.nimConfig.simpleDiSEqCSetVoltageTone, self.nimConfig.simpleDiSEqCOnlyOnSatChange)
def autoDiseqcCallback(self, result):
from Screens.Wizard import Wizard
if Wizard.instance is not None:
Wizard.instance.back()
else:
self.createSetup()
def fillListWithAdvancedSatEntrys(self, Sat):
lnbnum = int(Sat.lnb.value)
currLnb = self.nimConfig.advanced.lnb[lnbnum]
if isinstance(currLnb, ConfigNothing):
currLnb = None
# LNBs
self.advancedLnbsEntry = getConfigListEntry(_("LNB"), Sat.lnb)
self.list.append(self.advancedLnbsEntry)
if currLnb:
self.list.append(getConfigListEntry(_("Priority"), currLnb.prio))
self.advancedLof = getConfigListEntry("LOF", currLnb.lof)
self.list.append(self.advancedLof)
if currLnb.lof.value == "user_defined":
self.list.append(getConfigListEntry("LOF/L", currLnb.lofl))
self.list.append(getConfigListEntry("LOF/H", currLnb.lofh))
self.list.append(getConfigListEntry(_("Threshold"), currLnb.threshold))
if currLnb.lof.value == "unicable":
self.advancedUnicable = getConfigListEntry("Unicable "+_("Configuration mode"), currLnb.unicable)
self.list.append(self.advancedUnicable)
if currLnb.unicable.value == "unicable_user":
self.advancedSCR = getConfigListEntry(_("Channel"), currLnb.satcruser)
self.list.append(self.advancedSCR)
self.list.append(getConfigListEntry(_("Frequency"), currLnb.satcrvcouser[currLnb.satcruser.index]))
self.list.append(getConfigListEntry("LOF/L", currLnb.lofl))
self.list.append(getConfigListEntry("LOF/H", currLnb.lofh))
self.list.append(getConfigListEntry(_("Threshold"), currLnb.threshold))
elif currLnb.unicable.value == "unicable_matrix":
manufacturer_name = currLnb.unicableMatrixManufacturer.value
manufacturer = currLnb.unicableMatrix[manufacturer_name]
product_name = manufacturer.product.value
self.advancedManufacturer = getConfigListEntry(_("Manufacturer"), currLnb.unicableMatrixManufacturer)
self.advancedType = getConfigListEntry(_("Type"), manufacturer.product)
self.advancedSCR = getConfigListEntry(_("Channel"), manufacturer.scr[product_name])
self.list.append(self.advancedManufacturer)
self.list.append(self.advancedType)
self.list.append(self.advancedSCR)
self.list.append(getConfigListEntry(_("Frequency"), manufacturer.vco[product_name][manufacturer.scr[product_name].index]))
elif currLnb.unicable.value == "unicable_lnb":
manufacturer_name = currLnb.unicableLnbManufacturer.value
manufacturer = currLnb.unicableLnb[manufacturer_name]
product_name = manufacturer.product.value
self.advancedManufacturer = getConfigListEntry(_("Manufacturer"), currLnb.unicableLnbManufacturer)
self.advancedType = getConfigListEntry(_("Type"), manufacturer.product)
self.advancedSCR = getConfigListEntry(_("Channel"), manufacturer.scr[product_name])
self.list.append(self.advancedManufacturer)
self.list.append(self.advancedType)
self.list.append(self.advancedSCR)
self.list.append(getConfigListEntry(_("Frequency"), manufacturer.vco[product_name][manufacturer.scr[product_name].index]))
choices = []
connectable = nimmanager.canConnectTo(self.slotid)
for id in connectable:
choices.append((str(id), nimmanager.getNimDescription(id)))
if len(choices):
self.advancedConnected = getConfigListEntry(_("connected"), self.nimConfig.advanced.unicableconnected)
self.list.append(self.advancedConnected)
if self.nimConfig.advanced.unicableconnected.value == True:
self.nimConfig.advanced.unicableconnectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Connected to"),self.nimConfig.advanced.unicableconnectedTo))
else: #kein Unicable
self.list.append(getConfigListEntry(_("Voltage mode"), Sat.voltage))
self.list.append(getConfigListEntry(_("Increased voltage"), currLnb.increased_voltage))
self.list.append(getConfigListEntry(_("Tone mode"), Sat.tonemode))
if lnbnum < 65:
self.advancedDiseqcMode = getConfigListEntry(_("DiSEqC mode"), currLnb.diseqcMode)
self.list.append(self.advancedDiseqcMode)
if currLnb.diseqcMode.value != "none":
self.list.append(getConfigListEntry(_("Fast DiSEqC"), currLnb.fastDiseqc))
self.toneburst = getConfigListEntry(_("Toneburst"), currLnb.toneburst)
self.list.append(self.toneburst)
self.committedDiseqcCommand = getConfigListEntry(_("DiSEqC 1.0 command"), currLnb.commitedDiseqcCommand)
self.list.append(self.committedDiseqcCommand)
if currLnb.diseqcMode.value == "1_0":
if currLnb.toneburst.index and currLnb.commitedDiseqcCommand.index:
self.list.append(getConfigListEntry(_("Command order"), currLnb.commandOrder1_0))
else:
self.uncommittedDiseqcCommand = getConfigListEntry(_("DiSEqC 1.1 command"), currLnb.uncommittedDiseqcCommand)
self.list.append(self.uncommittedDiseqcCommand)
if currLnb.uncommittedDiseqcCommand.index:
if currLnb.commandOrder.value == "ct":
currLnb.commandOrder.value = "cut"
elif currLnb.commandOrder.value == "tc":
currLnb.commandOrder.value = "tcu"
else:
if currLnb.commandOrder.index & 1:
currLnb.commandOrder.value = "tc"
else:
currLnb.commandOrder.value = "ct"
self.commandOrder = getConfigListEntry(_("Command order"), currLnb.commandOrder)
if 1 < ((1 if currLnb.uncommittedDiseqcCommand.index else 0) + (1 if currLnb.commitedDiseqcCommand.index else 0) + (1 if currLnb.toneburst.index else 0)):
self.list.append(self.commandOrder)
if currLnb.uncommittedDiseqcCommand.index:
self.list.append(getConfigListEntry(_("DiSEqC 1.1 repeats"), currLnb.diseqcRepeats))
self.list.append(getConfigListEntry(_("Sequence repeat"), currLnb.sequenceRepeat))
if currLnb.diseqcMode.value == "1_2":
if SystemInfo["CanMeasureFrontendInputPower"]:
self.advancedPowerMeasurement = getConfigListEntry(_("Use power measurement"), currLnb.powerMeasurement)
self.list.append(self.advancedPowerMeasurement)
if currLnb.powerMeasurement.value:
self.list.append(getConfigListEntry(_("Power threshold in mA"), currLnb.powerThreshold))
self.turningSpeed = getConfigListEntry(_("Rotor turning speed"), currLnb.turningSpeed)
self.list.append(self.turningSpeed)
if currLnb.turningSpeed.value == "fast epoch":
self.turnFastEpochBegin = getConfigListEntry(_("Begin time"), currLnb.fastTurningBegin)
self.turnFastEpochEnd = getConfigListEntry(_("End time"), currLnb.fastTurningEnd)
self.list.append(self.turnFastEpochBegin)
self.list.append(self.turnFastEpochEnd)
else:
if currLnb.powerMeasurement.value:
currLnb.powerMeasurement.value = False
currLnb.powerMeasurement.save()
self.advancedUsalsEntry = getConfigListEntry(_("Use USALS for this sat"), Sat.usals)
if lnbnum < 65:
self.list.append(self.advancedUsalsEntry)
if Sat.usals.value:
self.list.append(getConfigListEntry(_("Longitude"), currLnb.longitude))
self.list.append(getConfigListEntry(" ", currLnb.longitudeOrientation))
self.list.append(getConfigListEntry(_("Latitude"), currLnb.latitude))
self.list.append(getConfigListEntry(" ", currLnb.latitudeOrientation))
else:
self.list.append(getConfigListEntry(_("Stored position"), Sat.rotorposition))
if not hasattr(self, 'additionalMotorOptions'):
self.additionalMotorOptions = ConfigYesNo(False)
self.showAdditionalMotorOptions = getConfigListEntry(_("Extra motor options"), self.additionalMotorOptions)
self.list.append(self.showAdditionalMotorOptions)
if self.additionalMotorOptions.value:
self.list.append(getConfigListEntry(" " + _("Horizontal turning speed") + " [" + chr(176) + "/sec]", currLnb.turningspeedH))
self.list.append(getConfigListEntry(" " + _("Vertical turning speed") + " [" + chr(176) + "/sec]", currLnb.turningspeedV))
self.list.append(getConfigListEntry(" " + _("Turning step size") + " [" + chr(176) + "]", currLnb.tuningstepsize))
self.list.append(getConfigListEntry(" " + _("Max memory positions"), currLnb.rotorPositions))
def fillAdvancedList(self):
self.list = [ ]
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
self.advancedSatsEntry = getConfigListEntry(_("Satellite"), self.nimConfig.advanced.sats)
self.list.append(self.advancedSatsEntry)
for x in self.nimConfig.advanced.sat.keys():
Sat = self.nimConfig.advanced.sat[x]
self.fillListWithAdvancedSatEntrys(Sat)
self["config"].list = self.list
def keyOk(self):
if self["config"].getCurrent() == self.advancedSelectSatsEntry:
conf = self.nimConfig.advanced.sat[int(self.nimConfig.advanced.sats.value)].userSatellitesList
self.session.openWithCallback(boundFunction(self.updateConfUserSatellitesList, conf), SelectSatsEntryScreen, userSatlist=conf.value)
elif self["config"].getCurrent() == self.selectSatsEntry:
conf = self.nimConfig.userSatellitesList
self.session.openWithCallback(boundFunction(self.updateConfUserSatellitesList, conf), SelectSatsEntryScreen, userSatlist=conf.value)
else:
self.keySave()
def updateConfUserSatellitesList(self, conf, val=None):
if val is not None:
conf.value = val
conf.save()
def keySave(self):
old_configured_sats = nimmanager.getConfiguredSats()
if not self.run():
return
new_configured_sats = nimmanager.getConfiguredSats()
self.unconfed_sats = old_configured_sats - new_configured_sats
self.satpos_to_remove = None
self.deleteConfirmed((None, "no"))
def deleteConfirmed(self, confirmed):
if confirmed is None:
confirmed = (None, "no")
if confirmed[1] == "yes" or confirmed[1] == "yestoall":
eDVBDB.getInstance().removeServices(-1, -1, -1, self.satpos_to_remove)
if self.satpos_to_remove is not None:
self.unconfed_sats.remove(self.satpos_to_remove)
self.satpos_to_remove = None
for orbpos in self.unconfed_sats:
self.satpos_to_remove = orbpos
orbpos = self.satpos_to_remove
try:
# why we need this cast?
sat_name = str(nimmanager.getSatDescription(orbpos))
except:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
sat_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
if confirmed[1] == "yes" or confirmed[1] == "no":
# TRANSLATORS: The satellite with name '%s' is no longer used after a configuration change. The user is asked whether or not the satellite should be deleted.
self.session.openWithCallback(self.deleteConfirmed, ChoiceBox, _("%s is no longer used. Should it be deleted?") % sat_name, [(_("Yes"), "yes"), (_("No"), "no"), (_("Yes to all"), "yestoall"), (_("No to all"), "notoall")], None, 1)
if confirmed[1] == "yestoall" or confirmed[1] == "notoall":
self.deleteConfirmed(confirmed)
break
else:
self.restoreService(_("Zap back to service before tuner setup?"))
def __init__(self, session, slotid):
Screen.__init__(self, session)
self.list = [ ]
ServiceStopScreen.__init__(self)
self.stopService()
ConfigListScreen.__init__(self, self.list)
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("Save"))
self["key_yellow"] = Label(_("Configuration mode"))
self["key_blue"] = Label()
self["actions"] = ActionMap(["SetupActions", "SatlistShortcutAction"],
{
"ok": self.keyOk,
"save": self.keySave,
"cancel": self.keyCancel,
"changetype": self.changeConfigurationMode,
"nothingconnected": self.nothingConnectedShortcut
}, -2)
self.slotid = slotid
self.nim = nimmanager.nim_slots[slotid]
self.nimConfig = self.nim.config
self.createConfigMode()
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Reception Settings"))
def keyLeft(self):
ConfigListScreen.keyLeft(self)
if self["config"].getCurrent() in (self.advancedSelectSatsEntry, self.selectSatsEntry):
self.keyOk()
else:
self.newConfig()
def setTextKeyBlue(self):
self["key_blue"].setText("")
if self["config"].isChanged():
self["key_blue"].setText(_("Set default"))
def keyRight(self):
ConfigListScreen.keyRight(self)
if self["config"].getCurrent() in (self.advancedSelectSatsEntry, self.selectSatsEntry):
self.keyOk()
else:
self.newConfig()
def handleKeyFileCallback(self, answer):
ConfigListScreen.handleKeyFileCallback(self, answer)
self.newConfig()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.restoreService(_("Zap back to service before tuner setup?"))
def saveAll(self):
if self.nim.isCompatible("DVB-S"):
# reset connectedTo to all choices to properly store the default value
choices = []
nimlist = nimmanager.getNimListOfType("DVB-S", self.slotid)
for id in nimlist:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
# sanity check for empty sat list
if self.nimConfig.configMode.value != "satposdepends" and len(nimmanager.getSatListForNim(self.slotid)) < 1:
self.nimConfig.configMode.value = "nothing"
if self["config"].isChanged():
for x in self["config"].list:
x[1].save()
configfile.save()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
# we need to call saveAll to reset the connectedTo choices
self.saveAll()
self.restoreService(_("Zap back to service before tuner setup?"))
def changeConfigurationMode(self):
if self.configMode:
self.nimConfig.configMode.selectNext()
self["config"].invalidate(self.configMode)
self.setTextKeyBlue()
self.createSetup()
def nothingConnectedShortcut(self):
if self["config"].isChanged():
for x in self["config"].list:
x[1].cancel()
self.setTextKeyBlue()
self.createSetup()
class NimSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.list = [None] * nimmanager.getSlotCount()
self["nimlist"] = List(self.list)
self.updateList()
self.setResultClass()
self["actions"] = ActionMap(["OkCancelActions", "MenuActions"],
{
"ok": self.okbuttonClick,
"cancel": self.close,
"menu": self.exit,
}, -2)
self.setTitle(_("Choose Tuner"))
def exit(self):
self.close(True)
def setResultClass(self):
self.resultclass = NimSetup
def okbuttonClick(self):
nim = self["nimlist"].getCurrent()
nim = nim and nim[3]
if nim is not None and not nim.empty and nim.isSupported():
self.session.openWithCallback(boundFunction(self.updateList, self["nimlist"].getIndex()), self.resultclass, nim.slot)
def showNim(self, nim):
return True
def updateList(self, index=None):
self.list = [ ]
for x in nimmanager.nim_slots:
slotid = x.slot
nimConfig = nimmanager.getNimConfig(x.slot)
text = nimConfig.configMode.value
if self.showNim(x):
if x.isCompatible("DVB-S"):
if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends"):
text = { "loopthrough": _("Loop through to"),
"equal": _("Equal to"),
"satposdepends": _("Second cable of motorized LNB") } [nimConfig.configMode.value]
text += " " + _("Tuner") + " " + ["A", "B", "C", "D"][int(nimConfig.connectedTo.value)]
elif nimConfig.configMode.value == "nothing":
text = _("not configured")
elif nimConfig.configMode.value == "simple":
if nimConfig.diseqcMode.value in ("single", "toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
text = {"single": _("Single"), "toneburst_a_b": _("Toneburst A/B"), "diseqc_a_b": _("DiSEqC A/B"), "diseqc_a_b_c_d": _("DiSEqC A/B/C/D")}[nimConfig.diseqcMode.value] + "\n"
text += _("Sats") + ": "
satnames = []
if nimConfig.diseqcA.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcA.value)))
if nimConfig.diseqcMode.value in ("toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
if nimConfig.diseqcB.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcB.value)))
if nimConfig.diseqcMode.value == "diseqc_a_b_c_d":
if nimConfig.diseqcC.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcC.value)))
if nimConfig.diseqcD.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcD.value)))
if len(satnames) <= 2:
text += ", ".join(satnames)
elif len(satnames) > 2:
# we need a newline here, since multi content lists don't support automtic line wrapping
text += ", ".join(satnames[:2]) + ",\n"
text += " " + ", ".join(satnames[2:])
elif nimConfig.diseqcMode.value in ("positioner", "positioner_select"):
text = {"positioner": _("Positioner"), "positioner_select": _("Positioner (selecting satellites)")}[nimConfig.diseqcMode.value]
text += ":"
if nimConfig.positionerMode.value == "usals":
text += "USALS"
elif nimConfig.positionerMode.value == "manual":
text += _("Manual")
else:
text = _("Simple")
elif nimConfig.configMode.value == "advanced":
text = _("Advanced")
elif x.isCompatible("DVB-T") or x.isCompatible("DVB-C"):
if nimConfig.configMode.value == "nothing":
text = _("Nothing connected")
elif nimConfig.configMode.value == "enabled":
text = _("Enabled")
if x.isMultiType():
text = _("Switchable tuner types:") + "(" + ','.join(x.getMultiTypeList().values()) + ")" + "\n" + text
if not x.isSupported():
text = _("Tuner is not supported")
self.list.append((slotid, x.friendly_full_description, text, x))
self["nimlist"].setList(self.list)
self["nimlist"].updateList(self.list)
if index is not None:
self["nimlist"].setIndex(index)
class SelectSatsEntryScreen(Screen):
skin = """
<screen name="SelectSatsEntryScreen" position="center,center" size="560,410" title="Select Sats Entry" >
<ePixmap name="red" position="0,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="yellow" position="280,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="420,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_green" position="140,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_yellow" position="280,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_blue" position="420,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="list" position="10,40" size="540,330" scrollbarMode="showNever" />
<ePixmap pixmap="skin_default/div-h.png" position="0,375" zPosition="1" size="540,2" transparent="1" alphatest="on" />
<widget name="hint" position="10,380" size="540,25" font="Regular;19" halign="center" transparent="1" />
</screen>"""
def __init__(self, session, userSatlist=[]):
Screen.__init__(self, session)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self["key_yellow"] = Button(_("Sort by"))
self["key_blue"] = Button(_("Select all"))
self["hint"] = Label(_("Press OK to toggle the selection"))
SatList = []
for sat in nimmanager.getSatList():
selected = False
if isinstance(userSatlist, str) and str(sat[0]) in userSatlist:
selected = True
SatList.append((sat[0], sat[1], sat[2], selected))
sat_list = [SelectionEntryComponent(x[1], x[0], x[2], x[3]) for x in SatList]
self["list"] = SelectionList(sat_list, enableWrapAround=True)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"red": self.cancel,
"green": self.save,
"yellow": self.sortBy,
"blue": self["list"].toggleAllSelection,
"save": self.save,
"cancel": self.cancel,
"ok": self["list"].toggleSelection,
}, -2)
self.setTitle(_("Select satellites"))
def save(self):
val = [x[0][1] for x in self["list"].list if x[0][3]]
self.close(str(val))
def cancel(self):
self.close(None)
def sortBy(self):
lst = self["list"].list
if len(lst) > 1:
menu = [(_("Reverse list"), "2"), (_("Standart list"), "1")]
connected_sat = [x[0][1] for x in lst if x[0][3]]
if len(connected_sat) > 0:
menu.insert(0,(_("Connected satellites"), "3"))
def sortAction(choice):
if choice:
reverse_flag = False
sort_type = int(choice[1])
if choice[1] == "2":
sort_type = reverse_flag = 1
elif choice[1] == "3":
reverse_flag = not reverse_flag
self["list"].sort(sortType=sort_type, flag=reverse_flag)
self["list"].moveToIndex(0)
self.session.openWithCallback(sortAction, ChoiceBox, title= _("Select sort method:"), list=menu)
| gpl-2.0 |
bbbbanjax/CloudBot | plugins/pagecheck.py | 35 | 1515 | import urllib.parse
from bs4 import BeautifulSoup
import requests
import requests.exceptions
from cloudbot import hook
@hook.command("down", "offline", "up")
def down(text):
"""<url> - checks if <url> is online or offline
:type text: str
"""
if "://" not in text:
text = 'http://' + text
text = 'http://' + urllib.parse.urlparse(text).netloc
try:
requests.get(text)
except requests.exceptions.ConnectionError:
return '{} seems to be down'.format(text)
else:
return '{} seems to be up'.format(text)
@hook.command()
def isup(text):
"""<url> - uses isup.me to check if <url> is online or offline
:type text: str
"""
url = text.strip()
# slightly overcomplicated, esoteric URL parsing
scheme, auth, path, query, fragment = urllib.parse.urlsplit(url)
domain = auth or path
try:
response = requests.get('http://isup.me/' + domain)
except requests.exceptions.ConnectionError:
return "Failed to get status."
if response.status_code != requests.codes.ok:
return "Failed to get status."
soup = BeautifulSoup(response.text, 'lxml')
content = soup.find('div').text.strip()
if "not just you" in content:
return "It's not just you. {} looks \x02\x034down\x02\x0f from here!".format(url)
elif "is up" in content:
return "It's just you. {} is \x02\x033up\x02\x0f.".format(url)
else:
return "Huh? That doesn't look like a site on the interweb."
| gpl-3.0 |
dharamgollapudi/jaikuengine | blob/views.py | 34 | 1380 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
from django import http
from django.conf import settings
from django.utils.http import http_date
from common.display import prep_stream_dict, prep_entry_list, prep_entry, prep_comment_list
from common import decorator
from common import exception
from common import api
@decorator.cache_forever
def blob_image_jpg(request, nick, path):
try:
img = api.image_get(request.user, nick, path, format='jpg')
if not img:
return http.HttpResponseNotFound()
content_type = "image/jpg"
response = http.HttpResponse(content_type=content_type)
response.write(img.content)
return response
except exception.ApiException, e:
logging.info("exc %s", e)
return http.HttpResponseForbidden()
except Exception:
return http.HttpResponseNotFound()
| apache-2.0 |
dashmug/saleor | saleor/checkout/test_checkout.py | 10 | 3047 | from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from mock import MagicMock, patch
from . import BillingAddressStep, ShippingStep
from ..checkout import STORAGE_SESSION_KEY
from ..checkout.steps import BaseAddressStep
from ..userprofile.models import Address
NEW_ADDRESS = {
'first_name': 'Test',
'last_name': 'Test',
'street_address_1': 'Test',
'street_address_2': 'Test',
'city': 'Test',
'phone': '12345678',
'postal_code': '987654',
'country': 'PL',
'country_area': '',
'company_name': 'Test'}
def test_base_address_step_works(rf):
request = rf.post('/checkout/', NEW_ADDRESS)
request.user = AnonymousUser()
address = Address(**NEW_ADDRESS)
step = BaseAddressStep(request, storage={}, address=address)
assert step.forms_are_valid()
assert step.address.first_name == 'Test'
def test_billing_address_save_without_address(rf):
data = dict(NEW_ADDRESS, email='test@example.com')
request = rf.post('/checkout/', data)
request.user = AnonymousUser()
storage = {}
step = BillingAddressStep(request, storage)
assert step.process() is None
assert isinstance(storage['address'], dict)
assert storage['address']['first_name'] == 'Test'
def test_billing_address_save_with_address_in_checkout(rf):
data = dict(NEW_ADDRESS, email='test@example.com')
request = rf.post('/checkout/', data)
request.user = AnonymousUser()
storage = {'address': {}}
step = BillingAddressStep(request, storage)
assert step.forms_are_valid()
def test_shipping_step_save_without_address(rf):
data = dict(NEW_ADDRESS, method='dummy_shipping')
request = rf.post('/checkout/', data)
request.user = AnonymousUser()
request.session = {STORAGE_SESSION_KEY: {}}
group = MagicMock()
group.address = None
storage = {'address': NEW_ADDRESS.copy()}
step = ShippingStep(request, storage, group)
assert step.forms_are_valid()
step.save()
assert isinstance(storage['address'], dict)
def test_shipping_step_save_with_address_in_group(rf):
data = dict(NEW_ADDRESS, method='dummy_shipping')
request = rf.post('/checkout/', data)
request.user = AnonymousUser()
request.session = {}
group = MagicMock()
group.address = NEW_ADDRESS.copy()
storage = {'address': NEW_ADDRESS.copy()}
step = ShippingStep(request, storage, group)
assert step.forms_are_valid()
step.save()
assert storage['address'] == NEW_ADDRESS
def test_shipping_step_save_with_address_in_checkout(rf):
data = dict(NEW_ADDRESS, method='dummy_shipping')
request = rf.post('/checkout/', data)
request.user = AnonymousUser()
request.session = {}
group = MagicMock()
group.address = None
storage = {
'address': {
'first_name': 'Change',
'last_name': 'Me',
'id': 10}}
step = ShippingStep(request, storage, group)
assert step.forms_are_valid()
step.save()
assert storage['address'] == NEW_ADDRESS
| bsd-3-clause |
orokusaki/django-jsonrpc-2-0 | jsonrpc/encoders.py | 1 | 1281 | import json
import decimal
from datetime import datetime, date, time
from django.db import models
from django.db.models.query import QuerySet
from django.core.serializers import serialize
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
class RobustEncoder(json.JSONEncoder):
"""
JSON encoder with support for ``QuerySet``, ``Model``, ``Promise``,
``datetime``, ``date``, ``time``, and ``Decimal`` objects.
"""
def default(self, obj):
"""
Provides custom functionality for certain types, defaulting to the
built-in encoder.
"""
# QuerySet
if isinstance(obj, QuerySet):
return json.loads(serialize('json', obj))
# Model
if isinstance(obj, models.Model):
return json.loads(serialize('json', [obj]))[0]
# Promise (e.g., ``ugettext_lazy``), and Decimal both get unicoded
if isinstance(obj, (Promise, decimal.Decimal)):
return force_unicode(obj)
# datetime, time, and date get isoformatted and unicoded
if isinstance(obj, (datetime, time, date)):
return unicode(obj.isoformat())
return super(RobustEncoder, self).default(obj)
| bsd-2-clause |
dmwu/sparrow | deploy/third_party/boto-2.1.1/boto/rds/parametergroup.py | 19 | 7114 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = str
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, str) or isinstance(value, unicode):
raise ValueError, 'value must be of type str'
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError, 'value must be in %s' % self.allowed_values
self._value = value
def _set_integer_value(self, value):
if isinstance(value, str) or isinstance(value, unicode):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError, 'range is %s' % self.allowed_values
self._value = value
else:
raise ValueError, 'value must be integer'
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, str) or isinstance(value, unicode):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError, 'value must be boolean'
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError, 'unknown type (%s)' % self.type
def get_value(self):
if self._value == None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError, 'unknown type (%s)' % self.type
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
| apache-2.0 |
google-code-export/marinemap | lingcod/straightline_spacing/models.py | 3 | 3107 | from django.contrib.gis.db import models
from django.conf import settings
from django.contrib.gis.measure import A, D
from lingcod.unit_converter.models import length_in_display_units, area_in_display_units
class SpacingPoint(models.Model):
name = models.CharField(max_length=200)
geometry = models.PointField(srid=settings.GEOMETRY_DB_SRID)
objects = models.GeoManager()
def __unicode__(self):
return unicode(self.name)
def all_spacing_points_dict():
"""
Returns a dictionary of the form: { point: 'name' } for all objects in SpacingPoint
"""
return dict( [ (p.geometry,p.name) for p in SpacingPoint.objects.all() ] )
def add_all_spacing_points(in_dict):
"""
Takes a dictionary of the form: { point: 'name' }, and adds all the objects in SpacingPoint
"""
in_dict.update(all_spacing_points_dict())
return in_dict
def distance_row_dict(from_dict, to_dict):
"""
from_dict will be a dict with a point as the key and a label as the value.
to_dict will be of the same format with multiple entries.
will return a dictionary with points as keys and a dictionary as values.
NOTE: This method assumes that the projection units are meters.
"""
from_pnt = from_dict.keys()[0]
for s_pnt in SpacingPoint.objects.all():
to_dict.update({s_pnt.geometry:s_pnt.name})
result = {}
for point, pnt_label in to_dict.iteritems():
result[point] = {
'label': pnt_label,
'distance': length_in_display_units(point.distance(from_pnt)),
'sort': point.y
}
return result
def distance_row_list(from_pnt, to_list):
"""
NOTE: This method assumes that the projection units are meters.
"""
result = []
for point in to_list:
result.append(length_in_display_units(point.distance(from_pnt)))
return result
def distance_matrix(point_list):
result = []
for point in point_list:
result.append(distance_row_list(point,point_list))
return result
def sorted_points_and_labels(in_dict):
"""
in_dict will look like:
{ point: 'name' }
sorted_points, sorted_labels (both lists) will be returned in a dictionary and they'll be
ordered from North to South.
"""
sorted_points = []
sorted_labels = []
y_dict = {}
for point, name in in_dict.iteritems():
y_dict.update( { point.y: point } )
y_list = y_dict.keys()
y_list.sort()
for y in reversed(y_list):
sorted_points.append(y_dict[y])
sorted_labels.append(in_dict[y_dict[y]])
return { 'points': sorted_points, 'labels': sorted_labels }
def distance_matrix_and_labels(in_dict,add_spacing_points=True):
"""
in_dict will look like:
{ point: 'name' }
Will return a dictionary with the keys 'labels' and 'matrix'
"""
if add_spacing_points:
in_dict = add_all_spacing_points(in_dict)
spl_dict = sorted_points_and_labels(in_dict)
dist_mat = distance_matrix(spl_dict['points'])
return { 'labels': spl_dict['labels'], 'matrix': dist_mat } | bsd-3-clause |
foursquare/commons-old | tests/python/twitter/common/config/properties_test.py | 15 | 2563 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'John Sirois'
import unittest
from twitter.common.collections import OrderedDict
from twitter.common.config import Properties
from twitter.common.contextutil import temporary_file
from twitter.common.lang import Compatibility
class PropertiesTest(unittest.TestCase):
def test_empty(self):
self.assertLoaded('', {})
self.assertLoaded(' ', {})
self.assertLoaded('\t', {})
self.assertLoaded('''
''', {})
def test_comments(self):
self.assertLoaded('''
# not=a prop
a=prop
! more non prop
''', {'a': 'prop'})
def test_kv_sep(self):
self.assertLoaded('''
a=b
c d\=
e\: :f
jack spratt = \tbob barker
g
h=
i :
''', {'a': 'b', 'c': 'd=', 'e:': 'f', 'jack spratt': 'bob barker', 'g': '', 'h': '', 'i': ''})
def test_line_continuation(self):
self.assertLoaded('''
# A 3 line continuation
a\\\\
\\
\\b
c=\
d
e: \
f
g\
:h
i\
= j
''', {'a\\': '\\b', 'c': 'd', 'e': 'f', 'g': 'h', 'i': 'j'})
def test_stream(self):
with temporary_file() as props_out:
props_out.write('''
it's a = file
''')
props_out.close()
with open(props_out.name, 'r') as props_in:
self.assertLoaded(props_in, {'it\'s a': 'file'})
def assertLoaded(self, contents, expected):
self.assertEquals(expected, Properties.load(contents))
def test_dump(self):
props = OrderedDict()
props['a'] = 1
props['b'] = '''2
'''
props['c'] =' 3 : ='
out = Compatibility.StringIO()
Properties.dump(props, out)
self.assertEquals('a=1\nb=2\\\n\nc=\\ 3\\ \\:\\ \\=\n', out.getvalue())
| apache-2.0 |
mcking49/apache-flask | Python/Lib/test/test_math.py | 45 | 44490 | # Python test set -- math module
# XXXX Should not do tests around zero only
from test.test_support import run_unittest, verbose
import unittest
import math
import os
import sys
import random
import struct
eps = 1E-05
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
# detect evidence of double-rounding: fsum is not always correctly
# rounded on machines that suffer from double rounding.
x, y = 1e16, 2.9999 # use temporary values to defeat peephole optimizer
HAVE_DOUBLE_ROUNDING = (x + y == 1e16 + 4)
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
math_testcases = os.path.join(test_dir, 'math_testcases.txt')
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
def to_ulps(x):
"""Convert a non-NaN float x to an integer, in such a way that
adjacent floats are converted to adjacent integers. Then
abs(ulps(x) - ulps(y)) gives the difference in ulps between two
floats.
The results from this function will only make sense on platforms
where C doubles are represented in IEEE 754 binary64 format.
"""
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n < 0:
n = ~(n+2**63)
return n
def ulps_check(expected, got, ulps=20):
"""Given non-NaN floats `expected` and `got`,
check that they're equal to within the given number of ulps.
Returns None on success and an error message on failure."""
ulps_error = to_ulps(got) - to_ulps(expected)
if abs(ulps_error) <= ulps:
return None
return "error = {} ulps; permitted error = {} ulps".format(ulps_error,
ulps)
def acc_check(expected, got, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# need to special case infinities, since inf - inf gives nan
if math.isinf(expected) and got == expected:
return None
error = got - expected
permitted_error = max(abs_err, rel_err * abs(expected))
if abs(error) < permitted_error:
return None
return "error = {}; permitted error = {}".format(error,
permitted_error)
def parse_mtestfile(fname):
"""Parse a file with test values
-- starts a comment
blank lines, or lines containing only a comment, are ignored
other lines are expected to have the form
id fn arg -> expected [flag]*
"""
with open(fname) as fp:
for line in fp:
# strip comments, and skip blank lines
if '--' in line:
line = line[:line.index('--')]
if not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg = lhs.split()
rhs_pieces = rhs.split()
exp = rhs_pieces[0]
flags = rhs_pieces[1:]
yield (id, fn, float(arg), float(exp), flags)
def parse_testfile(fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname) as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags
)
class MathTests(unittest.TestCase):
def ftest(self, name, value, expected):
if abs(value-expected) > eps:
# Use %r instead of %f so the error message
# displays full precision. Otherwise discrepancies
# in the last few bits will lead to very confusing
# error messages
self.fail('%s returned %r, expected %r' %
(name, value, expected))
def testConstants(self):
self.ftest('pi', math.pi, 3.1415926)
self.ftest('e', math.e, 2.7182818)
def testAcos(self):
self.assertRaises(TypeError, math.acos)
self.ftest('acos(-1)', math.acos(-1), math.pi)
self.ftest('acos(0)', math.acos(0), math.pi/2)
self.ftest('acos(1)', math.acos(1), 0)
self.assertRaises(ValueError, math.acos, INF)
self.assertRaises(ValueError, math.acos, NINF)
self.assertTrue(math.isnan(math.acos(NAN)))
def testAcosh(self):
self.assertRaises(TypeError, math.acosh)
self.ftest('acosh(1)', math.acosh(1), 0)
self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168)
self.assertRaises(ValueError, math.acosh, 0)
self.assertRaises(ValueError, math.acosh, -1)
self.assertEqual(math.acosh(INF), INF)
self.assertRaises(ValueError, math.acosh, NINF)
self.assertTrue(math.isnan(math.acosh(NAN)))
def testAsin(self):
self.assertRaises(TypeError, math.asin)
self.ftest('asin(-1)', math.asin(-1), -math.pi/2)
self.ftest('asin(0)', math.asin(0), 0)
self.ftest('asin(1)', math.asin(1), math.pi/2)
self.assertRaises(ValueError, math.asin, INF)
self.assertRaises(ValueError, math.asin, NINF)
self.assertTrue(math.isnan(math.asin(NAN)))
def testAsinh(self):
self.assertRaises(TypeError, math.asinh)
self.ftest('asinh(0)', math.asinh(0), 0)
self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305)
self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305)
self.assertEqual(math.asinh(INF), INF)
self.assertEqual(math.asinh(NINF), NINF)
self.assertTrue(math.isnan(math.asinh(NAN)))
def testAtan(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atan(-1)', math.atan(-1), -math.pi/4)
self.ftest('atan(0)', math.atan(0), 0)
self.ftest('atan(1)', math.atan(1), math.pi/4)
self.ftest('atan(inf)', math.atan(INF), math.pi/2)
self.ftest('atan(-inf)', math.atan(NINF), -math.pi/2)
self.assertTrue(math.isnan(math.atan(NAN)))
def testAtanh(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atanh(0)', math.atanh(0), 0)
self.ftest('atanh(0.5)', math.atanh(0.5), 0.54930614433405489)
self.ftest('atanh(-0.5)', math.atanh(-0.5), -0.54930614433405489)
self.assertRaises(ValueError, math.atanh, 1)
self.assertRaises(ValueError, math.atanh, -1)
self.assertRaises(ValueError, math.atanh, INF)
self.assertRaises(ValueError, math.atanh, NINF)
self.assertTrue(math.isnan(math.atanh(NAN)))
def testAtan2(self):
self.assertRaises(TypeError, math.atan2)
self.ftest('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
self.ftest('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
self.ftest('atan2(0., -2.3)', math.atan2(0., -2.3), math.pi)
self.ftest('atan2(0., -0.)', math.atan2(0., -0.), math.pi)
self.assertEqual(math.atan2(0., 0.), 0.)
self.assertEqual(math.atan2(0., 2.3), 0.)
self.assertEqual(math.atan2(0., INF), 0.)
self.assertTrue(math.isnan(math.atan2(0., NAN)))
# math.atan2(-0, x)
self.ftest('atan2(-0., -inf)', math.atan2(-0., NINF), -math.pi)
self.ftest('atan2(-0., -2.3)', math.atan2(-0., -2.3), -math.pi)
self.ftest('atan2(-0., -0.)', math.atan2(-0., -0.), -math.pi)
self.assertEqual(math.atan2(-0., 0.), -0.)
self.assertEqual(math.atan2(-0., 2.3), -0.)
self.assertEqual(math.atan2(-0., INF), -0.)
self.assertTrue(math.isnan(math.atan2(-0., NAN)))
# math.atan2(INF, x)
self.ftest('atan2(inf, -inf)', math.atan2(INF, NINF), math.pi*3/4)
self.ftest('atan2(inf, -2.3)', math.atan2(INF, -2.3), math.pi/2)
self.ftest('atan2(inf, -0.)', math.atan2(INF, -0.0), math.pi/2)
self.ftest('atan2(inf, 0.)', math.atan2(INF, 0.0), math.pi/2)
self.ftest('atan2(inf, 2.3)', math.atan2(INF, 2.3), math.pi/2)
self.ftest('atan2(inf, inf)', math.atan2(INF, INF), math.pi/4)
self.assertTrue(math.isnan(math.atan2(INF, NAN)))
# math.atan2(NINF, x)
self.ftest('atan2(-inf, -inf)', math.atan2(NINF, NINF), -math.pi*3/4)
self.ftest('atan2(-inf, -2.3)', math.atan2(NINF, -2.3), -math.pi/2)
self.ftest('atan2(-inf, -0.)', math.atan2(NINF, -0.0), -math.pi/2)
self.ftest('atan2(-inf, 0.)', math.atan2(NINF, 0.0), -math.pi/2)
self.ftest('atan2(-inf, 2.3)', math.atan2(NINF, 2.3), -math.pi/2)
self.ftest('atan2(-inf, inf)', math.atan2(NINF, INF), -math.pi/4)
self.assertTrue(math.isnan(math.atan2(NINF, NAN)))
# math.atan2(+finite, x)
self.ftest('atan2(2.3, -inf)', math.atan2(2.3, NINF), math.pi)
self.ftest('atan2(2.3, -0.)', math.atan2(2.3, -0.), math.pi/2)
self.ftest('atan2(2.3, 0.)', math.atan2(2.3, 0.), math.pi/2)
self.assertEqual(math.atan2(2.3, INF), 0.)
self.assertTrue(math.isnan(math.atan2(2.3, NAN)))
# math.atan2(-finite, x)
self.ftest('atan2(-2.3, -inf)', math.atan2(-2.3, NINF), -math.pi)
self.ftest('atan2(-2.3, -0.)', math.atan2(-2.3, -0.), -math.pi/2)
self.ftest('atan2(-2.3, 0.)', math.atan2(-2.3, 0.), -math.pi/2)
self.assertEqual(math.atan2(-2.3, INF), -0.)
self.assertTrue(math.isnan(math.atan2(-2.3, NAN)))
# math.atan2(NAN, x)
self.assertTrue(math.isnan(math.atan2(NAN, NINF)))
self.assertTrue(math.isnan(math.atan2(NAN, -2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, -0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, INF)))
self.assertTrue(math.isnan(math.atan2(NAN, NAN)))
def testCeil(self):
self.assertRaises(TypeError, math.ceil)
# These types will be int in py3k.
self.assertEqual(float, type(math.ceil(1)))
self.assertEqual(float, type(math.ceil(1L)))
self.assertEqual(float, type(math.ceil(1.0)))
self.ftest('ceil(0.5)', math.ceil(0.5), 1)
self.ftest('ceil(1.0)', math.ceil(1.0), 1)
self.ftest('ceil(1.5)', math.ceil(1.5), 2)
self.ftest('ceil(-0.5)', math.ceil(-0.5), 0)
self.ftest('ceil(-1.0)', math.ceil(-1.0), -1)
self.ftest('ceil(-1.5)', math.ceil(-1.5), -1)
self.assertEqual(math.ceil(INF), INF)
self.assertEqual(math.ceil(NINF), NINF)
self.assertTrue(math.isnan(math.ceil(NAN)))
class TestCeil(object):
def __float__(self):
return 41.3
class TestNoCeil(object):
pass
self.ftest('ceil(TestCeil())', math.ceil(TestCeil()), 42)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
@requires_IEEE_754
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
self.assertEqual(math.copysign(0., 42), 0.0)
self.assertEqual(math.copysign(1., -42), -1.0)
self.assertEqual(math.copysign(3, 0.), 3.0)
self.assertEqual(math.copysign(4., -0.), -4.0)
self.assertRaises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
self.assertEqual(math.copysign(1., 0.), 1.)
self.assertEqual(math.copysign(1., -0.), -1.)
self.assertEqual(math.copysign(INF, 0.), INF)
self.assertEqual(math.copysign(INF, -0.), NINF)
self.assertEqual(math.copysign(NINF, 0.), INF)
self.assertEqual(math.copysign(NINF, -0.), NINF)
# and of infinities
self.assertEqual(math.copysign(1., INF), 1.)
self.assertEqual(math.copysign(1., NINF), -1.)
self.assertEqual(math.copysign(INF, INF), INF)
self.assertEqual(math.copysign(INF, NINF), NINF)
self.assertEqual(math.copysign(NINF, INF), INF)
self.assertEqual(math.copysign(NINF, NINF), NINF)
self.assertTrue(math.isnan(math.copysign(NAN, 1.)))
self.assertTrue(math.isnan(math.copysign(NAN, INF)))
self.assertTrue(math.isnan(math.copysign(NAN, NINF)))
self.assertTrue(math.isnan(math.copysign(NAN, NAN)))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
self.assertTrue(math.isinf(math.copysign(INF, NAN)))
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEqual(abs(math.copysign(2., NAN)), 2.)
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0)
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0)
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assertTrue(math.isnan(math.cos(INF)))
self.assertTrue(math.isnan(math.cos(NINF)))
except ValueError:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assertTrue(math.isnan(math.cos(NAN)))
def testCosh(self):
self.assertRaises(TypeError, math.cosh)
self.ftest('cosh(0)', math.cosh(0), 1)
self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
self.assertEqual(math.cosh(INF), INF)
self.assertEqual(math.cosh(NINF), INF)
self.assertTrue(math.isnan(math.cosh(NAN)))
def testDegrees(self):
self.assertRaises(TypeError, math.degrees)
self.ftest('degrees(pi)', math.degrees(math.pi), 180.0)
self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
def testExp(self):
self.assertRaises(TypeError, math.exp)
self.ftest('exp(-1)', math.exp(-1), 1/math.e)
self.ftest('exp(0)', math.exp(0), 1)
self.ftest('exp(1)', math.exp(1), math.e)
self.assertEqual(math.exp(INF), INF)
self.assertEqual(math.exp(NINF), 0.)
self.assertTrue(math.isnan(math.exp(NAN)))
def testFabs(self):
self.assertRaises(TypeError, math.fabs)
self.ftest('fabs(-1)', math.fabs(-1), 1)
self.ftest('fabs(0)', math.fabs(0), 0)
self.ftest('fabs(1)', math.fabs(1), 1)
def testFactorial(self):
def fact(n):
result = 1
for i in range(1, int(n)+1):
result *= i
return result
values = range(10) + [50, 100, 500]
random.shuffle(values)
for x in values:
for cast in (int, long, float):
self.assertEqual(math.factorial(cast(x)), fact(x), (x, fact(x), math.factorial(x)))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, math.pi)
def testFloor(self):
self.assertRaises(TypeError, math.floor)
# These types will be int in py3k.
self.assertEqual(float, type(math.floor(1)))
self.assertEqual(float, type(math.floor(1L)))
self.assertEqual(float, type(math.floor(1.0)))
self.ftest('floor(0.5)', math.floor(0.5), 0)
self.ftest('floor(1.0)', math.floor(1.0), 1)
self.ftest('floor(1.5)', math.floor(1.5), 1)
self.ftest('floor(-0.5)', math.floor(-0.5), -1)
self.ftest('floor(-1.0)', math.floor(-1.0), -1)
self.ftest('floor(-1.5)', math.floor(-1.5), -2)
# pow() relies on floor() to check for integers
# This fails on some platforms - so check it here
self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
self.assertEqual(math.ceil(INF), INF)
self.assertEqual(math.ceil(NINF), NINF)
self.assertTrue(math.isnan(math.floor(NAN)))
class TestFloor(object):
def __float__(self):
return 42.3
class TestNoFloor(object):
pass
self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
self.assertRaises(TypeError, math.floor, TestNoFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10,1)', math.fmod(10,1), 0)
self.ftest('fmod(10,0.5)', math.fmod(10,0.5), 0)
self.ftest('fmod(10,1.5)', math.fmod(10,1.5), 1)
self.ftest('fmod(-10,1)', math.fmod(-10,1), 0)
self.ftest('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
self.ftest('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
self.assertTrue(math.isnan(math.fmod(NAN, 1.)))
self.assertTrue(math.isnan(math.fmod(1., NAN)))
self.assertTrue(math.isnan(math.fmod(NAN, NAN)))
self.assertRaises(ValueError, math.fmod, 1., 0.)
self.assertRaises(ValueError, math.fmod, INF, 1.)
self.assertRaises(ValueError, math.fmod, NINF, 1.)
self.assertRaises(ValueError, math.fmod, INF, 0.)
self.assertEqual(math.fmod(3.0, INF), 3.0)
self.assertEqual(math.fmod(-3.0, INF), -3.0)
self.assertEqual(math.fmod(3.0, NINF), 3.0)
self.assertEqual(math.fmod(-3.0, NINF), -3.0)
self.assertEqual(math.fmod(0.0, 3.0), 0.0)
self.assertEqual(math.fmod(0.0, NINF), 0.0)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
def testfrexp(name, result, expected):
(mant, exp), (emant, eexp) = result, expected
if abs(mant-emant) > eps or exp != eexp:
self.fail('%s returned %r, expected %r'%\
(name, (mant, exp), (emant,eexp)))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
self.assertEqual(math.frexp(INF)[0], INF)
self.assertEqual(math.frexp(NINF)[0], NINF)
self.assertTrue(math.isnan(math.frexp(NAN)[0]))
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"fsum is not exact on machines with double rounding")
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except ValueError:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in xrange(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in xrange(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
def testHypot(self):
self.assertRaises(TypeError, math.hypot)
self.ftest('hypot(0,0)', math.hypot(0,0), 0)
self.ftest('hypot(3,4)', math.hypot(3,4), 5)
self.assertEqual(math.hypot(NAN, INF), INF)
self.assertEqual(math.hypot(INF, NAN), INF)
self.assertEqual(math.hypot(NAN, NINF), INF)
self.assertEqual(math.hypot(NINF, NAN), INF)
self.assertTrue(math.isnan(math.hypot(1.0, NAN)))
self.assertTrue(math.isnan(math.hypot(NAN, -2.0)))
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2)
self.assertRaises(OverflowError, math.ldexp, 1., 1000000)
self.assertRaises(OverflowError, math.ldexp, -1., 1000000)
self.assertEqual(math.ldexp(1., -1000000), 0.)
self.assertEqual(math.ldexp(-1., -1000000), -0.)
self.assertEqual(math.ldexp(INF, 30), INF)
self.assertEqual(math.ldexp(NINF, -213), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, 0)))
# large second argument
for n in [10**5, 10L**5, 10**10, 10L**10, 10**20, 10**40]:
self.assertEqual(math.ldexp(INF, -n), INF)
self.assertEqual(math.ldexp(NINF, -n), NINF)
self.assertEqual(math.ldexp(1., -n), 0.)
self.assertEqual(math.ldexp(-1., -n), -0.)
self.assertEqual(math.ldexp(0., -n), 0.)
self.assertEqual(math.ldexp(-0., -n), -0.)
self.assertTrue(math.isnan(math.ldexp(NAN, -n)))
self.assertRaises(OverflowError, math.ldexp, 1., n)
self.assertRaises(OverflowError, math.ldexp, -1., n)
self.assertEqual(math.ldexp(0., n), 0.)
self.assertEqual(math.ldexp(-0., n), -0.)
self.assertEqual(math.ldexp(INF, n), INF)
self.assertEqual(math.ldexp(NINF, n), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, n)))
def testLog(self):
self.assertRaises(TypeError, math.log)
self.ftest('log(1/e)', math.log(1/math.e), -1)
self.ftest('log(1)', math.log(1), 0)
self.ftest('log(e)', math.log(math.e), 1)
self.ftest('log(32,2)', math.log(32,2), 5)
self.ftest('log(10**40, 10)', math.log(10**40, 10), 40)
self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
self.assertEqual(math.log(INF), INF)
self.assertRaises(ValueError, math.log, NINF)
self.assertTrue(math.isnan(math.log(NAN)))
# Log values should match for int and long (issue #18739).
for n in range(1, 1000):
self.assertEqual(math.log(n), math.log(long(n)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
self.ftest('log1p(0)', math.log1p(0), 0)
self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
self.ftest('log1p(1)', math.log1p(1), math.log(2))
self.assertEqual(math.log1p(INF), INF)
self.assertRaises(ValueError, math.log1p, NINF)
self.assertTrue(math.isnan(math.log1p(NAN)))
n= 2**90
self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
def testLog10(self):
self.assertRaises(TypeError, math.log10)
self.ftest('log10(0.1)', math.log10(0.1), -1)
self.ftest('log10(1)', math.log10(1), 0)
self.ftest('log10(10)', math.log10(10), 1)
self.assertEqual(math.log(INF), INF)
self.assertRaises(ValueError, math.log10, NINF)
self.assertTrue(math.isnan(math.log10(NAN)))
# Log values should match for int and long (issue #18739).
for n in range(1, 1000):
self.assertEqual(math.log10(n), math.log10(long(n)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
def testmodf(name, result, expected):
(v1, v2), (e1, e2) = result, expected
if abs(v1-e1) > eps or abs(v2-e2):
self.fail('%s returned %r, expected %r'%\
(name, (v1,v2), (e1,e2)))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
self.assertEqual(math.modf(INF), (0.0, INF))
self.assertEqual(math.modf(NINF), (-0.0, NINF))
modf_nan = math.modf(NAN)
self.assertTrue(math.isnan(modf_nan[0]))
self.assertTrue(math.isnan(modf_nan[1]))
def testPow(self):
self.assertRaises(TypeError, math.pow)
self.ftest('pow(0,1)', math.pow(0,1), 0)
self.ftest('pow(1,0)', math.pow(1,0), 1)
self.ftest('pow(2,1)', math.pow(2,1), 2)
self.ftest('pow(2,-1)', math.pow(2,-1), 0.5)
self.assertEqual(math.pow(INF, 1), INF)
self.assertEqual(math.pow(NINF, 1), NINF)
self.assertEqual((math.pow(1, INF)), 1.)
self.assertEqual((math.pow(1, NINF)), 1.)
self.assertTrue(math.isnan(math.pow(NAN, 1)))
self.assertTrue(math.isnan(math.pow(2, NAN)))
self.assertTrue(math.isnan(math.pow(0, NAN)))
self.assertEqual(math.pow(1, NAN), 1)
# pow(0., x)
self.assertEqual(math.pow(0., INF), 0.)
self.assertEqual(math.pow(0., 3.), 0.)
self.assertEqual(math.pow(0., 2.3), 0.)
self.assertEqual(math.pow(0., 2.), 0.)
self.assertEqual(math.pow(0., 0.), 1.)
self.assertEqual(math.pow(0., -0.), 1.)
self.assertRaises(ValueError, math.pow, 0., -2.)
self.assertRaises(ValueError, math.pow, 0., -2.3)
self.assertRaises(ValueError, math.pow, 0., -3.)
self.assertRaises(ValueError, math.pow, 0., NINF)
self.assertTrue(math.isnan(math.pow(0., NAN)))
# pow(INF, x)
self.assertEqual(math.pow(INF, INF), INF)
self.assertEqual(math.pow(INF, 3.), INF)
self.assertEqual(math.pow(INF, 2.3), INF)
self.assertEqual(math.pow(INF, 2.), INF)
self.assertEqual(math.pow(INF, 0.), 1.)
self.assertEqual(math.pow(INF, -0.), 1.)
self.assertEqual(math.pow(INF, -2.), 0.)
self.assertEqual(math.pow(INF, -2.3), 0.)
self.assertEqual(math.pow(INF, -3.), 0.)
self.assertEqual(math.pow(INF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(INF, NAN)))
# pow(-0., x)
self.assertEqual(math.pow(-0., INF), 0.)
self.assertEqual(math.pow(-0., 3.), -0.)
self.assertEqual(math.pow(-0., 2.3), 0.)
self.assertEqual(math.pow(-0., 2.), 0.)
self.assertEqual(math.pow(-0., 0.), 1.)
self.assertEqual(math.pow(-0., -0.), 1.)
self.assertRaises(ValueError, math.pow, -0., -2.)
self.assertRaises(ValueError, math.pow, -0., -2.3)
self.assertRaises(ValueError, math.pow, -0., -3.)
self.assertRaises(ValueError, math.pow, -0., NINF)
self.assertTrue(math.isnan(math.pow(-0., NAN)))
# pow(NINF, x)
self.assertEqual(math.pow(NINF, INF), INF)
self.assertEqual(math.pow(NINF, 3.), NINF)
self.assertEqual(math.pow(NINF, 2.3), INF)
self.assertEqual(math.pow(NINF, 2.), INF)
self.assertEqual(math.pow(NINF, 0.), 1.)
self.assertEqual(math.pow(NINF, -0.), 1.)
self.assertEqual(math.pow(NINF, -2.), 0.)
self.assertEqual(math.pow(NINF, -2.3), 0.)
self.assertEqual(math.pow(NINF, -3.), -0.)
self.assertEqual(math.pow(NINF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(NINF, NAN)))
# pow(-1, x)
self.assertEqual(math.pow(-1., INF), 1.)
self.assertEqual(math.pow(-1., 3.), -1.)
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertEqual(math.pow(-1., 2.), 1.)
self.assertEqual(math.pow(-1., 0.), 1.)
self.assertEqual(math.pow(-1., -0.), 1.)
self.assertEqual(math.pow(-1., -2.), 1.)
self.assertRaises(ValueError, math.pow, -1., -2.3)
self.assertEqual(math.pow(-1., -3.), -1.)
self.assertEqual(math.pow(-1., NINF), 1.)
self.assertTrue(math.isnan(math.pow(-1., NAN)))
# pow(1, x)
self.assertEqual(math.pow(1., INF), 1.)
self.assertEqual(math.pow(1., 3.), 1.)
self.assertEqual(math.pow(1., 2.3), 1.)
self.assertEqual(math.pow(1., 2.), 1.)
self.assertEqual(math.pow(1., 0.), 1.)
self.assertEqual(math.pow(1., -0.), 1.)
self.assertEqual(math.pow(1., -2.), 1.)
self.assertEqual(math.pow(1., -2.3), 1.)
self.assertEqual(math.pow(1., -3.), 1.)
self.assertEqual(math.pow(1., NINF), 1.)
self.assertEqual(math.pow(1., NAN), 1.)
# pow(x, 0) should be 1 for any x
self.assertEqual(math.pow(2.3, 0.), 1.)
self.assertEqual(math.pow(-2.3, 0.), 1.)
self.assertEqual(math.pow(NAN, 0.), 1.)
self.assertEqual(math.pow(2.3, -0.), 1.)
self.assertEqual(math.pow(-2.3, -0.), 1.)
self.assertEqual(math.pow(NAN, -0.), 1.)
# pow(x, y) is invalid if x is negative and y is not integral
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertRaises(ValueError, math.pow, -15., -3.1)
# pow(x, NINF)
self.assertEqual(math.pow(1.9, NINF), 0.)
self.assertEqual(math.pow(1.1, NINF), 0.)
self.assertEqual(math.pow(0.9, NINF), INF)
self.assertEqual(math.pow(0.1, NINF), INF)
self.assertEqual(math.pow(-0.1, NINF), INF)
self.assertEqual(math.pow(-0.9, NINF), INF)
self.assertEqual(math.pow(-1.1, NINF), 0.)
self.assertEqual(math.pow(-1.9, NINF), 0.)
# pow(x, INF)
self.assertEqual(math.pow(1.9, INF), INF)
self.assertEqual(math.pow(1.1, INF), INF)
self.assertEqual(math.pow(0.9, INF), 0.)
self.assertEqual(math.pow(0.1, INF), 0.)
self.assertEqual(math.pow(-0.1, INF), 0.)
self.assertEqual(math.pow(-0.9, INF), 0.)
self.assertEqual(math.pow(-1.1, INF), INF)
self.assertEqual(math.pow(-1.9, INF), INF)
# pow(x, y) should work for x negative, y an integer
self.ftest('(-2.)**3.', math.pow(-2.0, 3.0), -8.0)
self.ftest('(-2.)**2.', math.pow(-2.0, 2.0), 4.0)
self.ftest('(-2.)**1.', math.pow(-2.0, 1.0), -2.0)
self.ftest('(-2.)**0.', math.pow(-2.0, 0.0), 1.0)
self.ftest('(-2.)**-0.', math.pow(-2.0, -0.0), 1.0)
self.ftest('(-2.)**-1.', math.pow(-2.0, -1.0), -0.5)
self.ftest('(-2.)**-2.', math.pow(-2.0, -2.0), 0.25)
self.ftest('(-2.)**-3.', math.pow(-2.0, -3.0), -0.125)
self.assertRaises(ValueError, math.pow, -2.0, -0.5)
self.assertRaises(ValueError, math.pow, -2.0, 0.5)
# the following tests have been commented out since they don't
# really belong here: the implementation of ** for floats is
# independent of the implementation of math.pow
#self.assertEqual(1**NAN, 1)
#self.assertEqual(1**INF, 1)
#self.assertEqual(1**NINF, 1)
#self.assertEqual(1**0, 1)
#self.assertEqual(1.**NAN, 1)
#self.assertEqual(1.**INF, 1)
#self.assertEqual(1.**NINF, 1)
#self.assertEqual(1.**0, 1)
def testRadians(self):
self.assertRaises(TypeError, math.radians)
self.ftest('radians(180)', math.radians(180), math.pi)
self.ftest('radians(90)', math.radians(90), math.pi/2)
self.ftest('radians(-45)', math.radians(-45), -math.pi/4)
def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assertTrue(math.isnan(math.sin(INF)))
self.assertTrue(math.isnan(math.sin(NINF)))
except ValueError:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assertTrue(math.isnan(math.sin(NAN)))
def testSinh(self):
self.assertRaises(TypeError, math.sinh)
self.ftest('sinh(0)', math.sinh(0), 0)
self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
self.assertEqual(math.sinh(INF), INF)
self.assertEqual(math.sinh(NINF), NINF)
self.assertTrue(math.isnan(math.sinh(NAN)))
def testSqrt(self):
self.assertRaises(TypeError, math.sqrt)
self.ftest('sqrt(0)', math.sqrt(0), 0)
self.ftest('sqrt(1)', math.sqrt(1), 1)
self.ftest('sqrt(4)', math.sqrt(4), 2)
self.assertEqual(math.sqrt(INF), INF)
self.assertRaises(ValueError, math.sqrt, NINF)
self.assertTrue(math.isnan(math.sqrt(NAN)))
def testTan(self):
self.assertRaises(TypeError, math.tan)
self.ftest('tan(0)', math.tan(0), 0)
self.ftest('tan(pi/4)', math.tan(math.pi/4), 1)
self.ftest('tan(-pi/4)', math.tan(-math.pi/4), -1)
try:
self.assertTrue(math.isnan(math.tan(INF)))
self.assertTrue(math.isnan(math.tan(NINF)))
except:
self.assertRaises(ValueError, math.tan, INF)
self.assertRaises(ValueError, math.tan, NINF)
self.assertTrue(math.isnan(math.tan(NAN)))
def testTanh(self):
self.assertRaises(TypeError, math.tanh)
self.ftest('tanh(0)', math.tanh(0), 0)
self.ftest('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
self.ftest('tanh(inf)', math.tanh(INF), 1)
self.ftest('tanh(-inf)', math.tanh(NINF), -1)
self.assertTrue(math.isnan(math.tanh(NAN)))
# check that tanh(-0.) == -0. on IEEE 754 systems
if float.__getformat__("double").startswith("IEEE"):
self.assertEqual(math.tanh(-0.), -0.)
self.assertEqual(math.copysign(1., math.tanh(-0.)),
math.copysign(1., -0.))
def test_trunc(self):
self.assertEqual(math.trunc(1), 1)
self.assertEqual(math.trunc(-1), -1)
self.assertEqual(type(math.trunc(1)), int)
self.assertEqual(type(math.trunc(1.5)), int)
self.assertEqual(math.trunc(1.5), 1)
self.assertEqual(math.trunc(-1.5), -1)
self.assertEqual(math.trunc(1.999999), 1)
self.assertEqual(math.trunc(-1.999999), -1)
self.assertEqual(math.trunc(-0.999999), -0)
self.assertEqual(math.trunc(-100.999), -100)
class TestTrunc(object):
def __trunc__(self):
return 23
class TestNoTrunc(object):
pass
self.assertEqual(math.trunc(TestTrunc()), 23)
self.assertRaises(TypeError, math.trunc)
self.assertRaises(TypeError, math.trunc, 1, 2)
self.assertRaises((AttributeError, TypeError), math.trunc,
TestNoTrunc())
def testIsnan(self):
self.assertTrue(math.isnan(float("nan")))
self.assertTrue(math.isnan(float("inf")* 0.))
self.assertFalse(math.isnan(float("inf")))
self.assertFalse(math.isnan(0.))
self.assertFalse(math.isnan(1.))
def testIsinf(self):
self.assertTrue(math.isinf(float("inf")))
self.assertTrue(math.isinf(float("-inf")))
self.assertTrue(math.isinf(1E400))
self.assertTrue(math.isinf(-1E400))
self.assertFalse(math.isinf(float("nan")))
self.assertFalse(math.isinf(0.))
self.assertFalse(math.isinf(1.))
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
@unittest.skipUnless(verbose, 'requires verbose mode')
def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError")
@requires_IEEE_754
def test_testfile(self):
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex, or if
# flags is nonempty
if ai != 0. or ei != 0. or flags:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
func = getattr(math, fn)
try:
result = func(ar)
except ValueError:
message = ("Unexpected ValueError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
except OverflowError:
message = ("Unexpected OverflowError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
self.ftest("%s:%s(%r)" % (id, fn, ar), result, er)
@unittest.skipUnless(float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
def test_mtestfile(self):
ALLOWED_ERROR = 20 # permitted error, in ulps
fail_fmt = "{}:{}({!r}): expected {!r}, got {!r}"
failures = []
for id, fn, arg, expected, flags in parse_mtestfile(math_testcases):
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
expected = 'ValueError'
elif 'overflow' in flags:
expected = 'OverflowError'
try:
got = func(arg)
except ValueError:
got = 'ValueError'
except OverflowError:
got = 'OverflowError'
accuracy_failure = None
if isinstance(got, float) and isinstance(expected, float):
if math.isnan(expected) and math.isnan(got):
continue
if not math.isnan(expected) and not math.isnan(got):
if fn == 'lgamma':
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
accuracy_failure = acc_check(expected, got,
rel_err = 5e-15,
abs_err = 5e-15)
elif fn == 'erfc':
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
accuracy_failure = ulps_check(expected, got, 2000)
else:
accuracy_failure = ulps_check(expected, got, 20)
if accuracy_failure is None:
continue
if isinstance(got, str) and isinstance(expected, str):
if got == expected:
continue
fail_msg = fail_fmt.format(id, fn, arg, expected, got)
if accuracy_failure is not None:
fail_msg += ' ({})'.format(accuracy_failure)
failures.append(fail_msg)
if failures:
self.fail('Failures in test_mtestfile:\n ' +
'\n '.join(failures))
def test_main():
from doctest import DocFileSuite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MathTests))
suite.addTest(DocFileSuite("ieee754.txt"))
run_unittest(suite)
if __name__ == '__main__':
test_main()
| mit |
dinhhung09138/store-Adminstration | Web/Content/lte/bower_components/jvectormap/converter/converter.py | 129 | 10451 | #
# jVectorMap version 2.0.4
#
# Copyright 2011-2013, Kirill Lebedev
#
import sys
import shapely.geometry
import shapely.wkb
import shapely.affinity
from osgeo import ogr
from osgeo import osr
import json
import codecs
import copy
class Map:
def __init__(self, name, language):
self.paths = {}
self.name = name
self.language = language
self.width = 0
self.height = 0
self.bbox = []
def addPath(self, path, code, name):
self.paths[code] = {"path": path, "name": name}
def getJSCode(self):
map = {"paths": self.paths, "width": self.width, "height": self.height, "insets": self.insets, "projection": self.projection}
return "jQuery.fn.vectorMap('addMap', '"+self.name+"_"+self.projection['type']+"_"+self.language+"',"+json.dumps(map)+');'
class Converter:
def __init__(self, config):
args = {
'buffer_distance': -0.4,
'simplify_tolerance': 0.2,
'longitude0': 0,
'projection': 'mill',
'name': 'world',
'width': 900,
'language': 'en',
'precision': 2,
'insets': []
}
args.update(config)
self.map = Map(args['name'], args.get('language'))
if args.get('sources'):
self.sources = args['sources']
else:
self.sources = [{
'input_file': args.get('input_file'),
'where': args.get('where'),
'name_field': args.get('name_field'),
'code_field': args.get('code_field'),
'input_file_encoding': args.get('input_file_encoding')
}]
default_source = {
'where': '',
'name_field': 0,
'code_field': 1,
'input_file_encoding': 'iso-8859-1'
}
for index in range(len(self.sources)):
for key in default_source:
if self.sources[index].get(key) is None:
self.sources[index][key] = default_source[key]
self.features = {}
self.width = args.get('width')
self.minimal_area = args.get('minimal_area')
self.longitude0 = float(args.get('longitude0'))
self.projection = args.get('projection')
self.precision = args.get('precision')
self.buffer_distance = args.get('buffer_distance')
self.simplify_tolerance = args.get('simplify_tolerance')
self.for_each = args.get('for_each')
self.emulate_longitude0 = args.get('emulate_longitude0')
if args.get('emulate_longitude0') is None and (self.projection == 'merc' or self.projection =='mill') and self.longitude0 != 0:
self.emulate_longitude0 = True
if args.get('viewport'):
self.viewport = map(lambda s: float(s), args.get('viewport').split(' '))
else:
self.viewport = False
# spatial reference to convert to
self.spatialRef = osr.SpatialReference()
projString = '+proj='+str(self.projection)+' +a=6381372 +b=6381372 +lat_0=0'
if not self.emulate_longitude0:
projString += ' +lon_0='+str(self.longitude0)
self.spatialRef.ImportFromProj4(projString)
# handle map insets
if args.get('insets'):
self.insets = args.get('insets')
else:
self.insets = []
def loadData(self):
for sourceConfig in self.sources:
self.loadDataSource( sourceConfig )
def loadDataSource(self, sourceConfig):
source = ogr.Open( sourceConfig['input_file'] )
layer = source.GetLayer(0)
layer.SetAttributeFilter( sourceConfig['where'].encode('ascii') )
self.viewportRect = False
transformation = osr.CoordinateTransformation( layer.GetSpatialRef(), self.spatialRef )
if self.viewport:
layer.SetSpatialFilterRect( *self.viewport )
point1 = transformation.TransformPoint(self.viewport[0], self.viewport[1])
point2 = transformation.TransformPoint(self.viewport[2], self.viewport[3])
self.viewportRect = shapely.geometry.box(point1[0], point1[1], point2[0], point2[1])
layer.ResetReading()
codes = {}
if self.emulate_longitude0:
meridian = -180 + self.longitude0
p1 = transformation.TransformPoint(-180, 89)
p2 = transformation.TransformPoint(meridian, -89)
left = shapely.geometry.box(p1[0], p1[1], p2[0], p2[1])
p3 = transformation.TransformPoint(meridian, 89)
p4 = transformation.TransformPoint(180, -89)
right = shapely.geometry.box(p3[0], p3[1], p4[0], p4[1])
# load features
nextCode = 0
for feature in layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
geometry.TransformTo( self.spatialRef )
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
if not shapelyGeometry.is_valid:
shapelyGeometry = shapelyGeometry.buffer(0, 1)
if self.emulate_longitude0:
leftPart = shapely.affinity.translate(shapelyGeometry.intersection(left), p4[0] - p3[0])
rightPart = shapely.affinity.translate(shapelyGeometry.intersection(right), p1[0] - p2[0])
shapelyGeometry = leftPart.buffer(0.1, 1).union(rightPart.buffer(0.1, 1)).buffer(-0.1, 1)
if not shapelyGeometry.is_valid:
shapelyGeometry = shapelyGeometry.buffer(0, 1)
shapelyGeometry = self.applyFilters(shapelyGeometry)
if shapelyGeometry:
name = feature.GetFieldAsString(str(sourceConfig.get('name_field'))).decode(sourceConfig.get('input_file_encoding'))
code = feature.GetFieldAsString(str(sourceConfig.get('code_field'))).decode(sourceConfig.get('input_file_encoding'))
if code in codes:
code = '_' + str(nextCode)
nextCode += 1
codes[code] = name
self.features[code] = {"geometry": shapelyGeometry, "name": name, "code": code}
else:
raise Exception, "Wrong geometry type: "+geometryType
def convert(self, outputFile):
print 'Generating '+outputFile
self.loadData()
codes = self.features.keys()
main_codes = copy.copy(codes)
self.map.insets = []
envelope = []
for inset in self.insets:
insetBbox = self.renderMapInset(inset['codes'], inset['left'], inset['top'], inset['width'])
insetHeight = (insetBbox[3] - insetBbox[1]) * (inset['width'] / (insetBbox[2] - insetBbox[0]))
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": inset['left'],
"top": inset['top'],
"width": inset['width'],
"height": insetHeight
})
envelope.append(
shapely.geometry.box(
inset['left'], inset['top'], inset['left'] + inset['width'], inset['top'] + insetHeight
)
)
for code in inset['codes']:
main_codes.remove(code)
insetBbox = self.renderMapInset(main_codes, 0, 0, self.width)
insetHeight = (insetBbox[3] - insetBbox[1]) * (self.width / (insetBbox[2] - insetBbox[0]))
envelope.append( shapely.geometry.box( 0, 0, self.width, insetHeight ) )
mapBbox = shapely.geometry.MultiPolygon( envelope ).bounds
self.map.width = mapBbox[2] - mapBbox[0]
self.map.height = mapBbox[3] - mapBbox[1]
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": 0,
"top": 0,
"width": self.width,
"height": insetHeight
})
self.map.projection = {"type": self.projection, "centralMeridian": float(self.longitude0)}
open(outputFile, 'w').write( self.map.getJSCode() )
if self.for_each is not None:
for code in codes:
childConfig = copy.deepcopy(self.for_each)
for param in ('input_file', 'output_file', 'where', 'name'):
childConfig[param] = childConfig[param].replace('{{code}}', code.lower())
converter = Converter(childConfig)
converter.convert(childConfig['output_file'])
def renderMapInset(self, codes, left, top, width):
envelope = []
for code in codes:
envelope.append( self.features[code]['geometry'].envelope )
bbox = shapely.geometry.MultiPolygon( envelope ).bounds
scale = (bbox[2]-bbox[0]) / width
# generate SVG paths
for code in codes:
feature = self.features[code]
geometry = feature['geometry']
if self.buffer_distance:
geometry = geometry.buffer(self.buffer_distance*scale, 1)
if geometry.is_empty:
continue
if self.simplify_tolerance:
geometry = geometry.simplify(self.simplify_tolerance*scale, preserve_topology=True)
if isinstance(geometry, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geoms
else:
polygons = [geometry]
path = ''
for polygon in polygons:
rings = []
rings.append(polygon.exterior)
rings.extend(polygon.interiors)
for ring in rings:
for pointIndex in range( len(ring.coords) ):
point = ring.coords[pointIndex]
if pointIndex == 0:
path += 'M'+str( round( (point[0]-bbox[0]) / scale + left, self.precision) )
path += ','+str( round( (bbox[3] - point[1]) / scale + top, self.precision) )
else:
path += 'l' + str( round(point[0]/scale - ring.coords[pointIndex-1][0]/scale, self.precision) )
path += ',' + str( round(ring.coords[pointIndex-1][1]/scale - point[1]/scale, self.precision) )
path += 'Z'
self.map.addPath(path, feature['code'], feature['name'])
return bbox
def applyFilters(self, geometry):
if self.viewportRect:
geometry = self.filterByViewport(geometry)
if not geometry:
return False
if self.minimal_area:
geometry = self.filterByMinimalArea(geometry)
if not geometry:
return False
return geometry
def filterByViewport(self, geometry):
try:
return geometry.intersection(self.viewportRect)
except shapely.geos.TopologicalError:
return False
def filterByMinimalArea(self, geometry):
if isinstance(geometry, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geoms
else:
polygons = [geometry]
polygons = filter(lambda p: p.area > self.minimal_area, polygons)
return shapely.geometry.multipolygon.MultiPolygon(polygons)
args = {}
if len(sys.argv) > 1:
paramsJson = open(sys.argv[1], 'r').read()
else:
paramsJson = sys.stdin.read()
paramsJson = json.loads(paramsJson)
converter = Converter(paramsJson)
converter.convert(paramsJson['output_file'])
| mit |
zwizwa/qemu | scripts/analyse-9p-simpletrace.py | 333 | 9058 | #!/usr/bin/env python
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print "Pretty printing 9p simpletrace log ..."
def v9fs_rerror(self, tag, id, err):
print "RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")"
def v9fs_version(self, tag, id, msize, version):
print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_version_return(self, tag, id, msize, version):
print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
def v9fs_attach_return(self, tag, id, type, version, path):
print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_stat(self, tag, id, fid):
print "TSTAT (tag =", tag, ", fid =", fid, ")"
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
def v9fs_getattr(self, tag, id, fid, request_mask):
print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
def v9fs_walk_return(self, tag, id, nwnames, qids):
print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
def v9fs_open(self, tag, id, fid, mode):
print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_fsync(self, tag, id, fid, datasync):
print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
def v9fs_clunk(self, tag, id, fid):
print "TCLUNK (tag =", tag, ", fid =", fid, ")"
def v9fs_read(self, tag, id, fid, off, max_count):
print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
def v9fs_read_return(self, tag, id, count, err):
print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
def v9fs_readdir_return(self, tag, id, count, retval):
print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
def v9fs_write(self, tag, id, fid, off, count, cnt):
print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
def v9fs_write_return(self, tag, id, total, err):
print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
def v9fs_create(self, tag, id, fid, name, perm, mode):
print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
def v9fs_symlink_return(self, tag, id, type, version, path):
print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_flush(self, tag, id, flush_tag):
print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
def v9fs_link(self, tag, id, dfid, oldfid, name):
print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
def v9fs_remove(self, tag, id, fid):
print "TREMOVE (tag =", tag, ", fid =", fid, ")"
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
def v9fs_lock(self, tag, id, fid, type, start, length):
print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_lock_return(self, tag, id, status):
print "RLOCK (tag =", tag, ", status =", status, ")"
def v9fs_getlock(self, tag, id, fid, type, start, length):
print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
def v9fs_xattrwalk_return(self, tag, id, size):
print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
def v9fs_readlink(self, tag, id, fid):
print "TREADLINK (tag =", tag, ", fid =", fid, ")"
def v9fs_readlink_return(self, tag, id, target):
print "RREADLINK (tag =", tag, ", target =", target, ")"
simpletrace.run(VirtFSRequestTracker())
| gpl-2.0 |
Pikecillo/genna | genna/uml/element.py | 1 | 14453 | ##======================================================================
##
## Copyright (C) 2007. Mario Rincon Nigro.
## Universidad de Los Andes.
##
## This file is part of Genna.
##
## Genna is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## Genna is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Genna. If not, see <http://www.gnu.org/licenses/>.
##
##======================================================================
# Implementacion de subconjunto del metamodelo del UML 1.4.
# Las clases de elementos son diccionarios. Los atributos y asociaciones
# de estas clases se agregan a traves del metodo set.
from exception import BadElementCompositionError
# Clase base ModelElement.
class ModelElement(dict):
# Identificador del elemento es obligatorio en toda instancia
def __init__(self, xmi_id, name=None):
# Todos los atributos de elementos son tuplas: el primer item
# es donde guardo el objeto valor, el segundo item
# es la lista de clases de los cuales puede ser
# instancia el objeto que guarde
self['xmi.id'] = (xmi_id, [u''.__class__])
self['name'] = (name, [u''.__class__])
self['stereotype'] = ([], [u''.__class__]) # xmi.idref a Stereotype
self['taggedValue'] = ([], [TaggedValue])
# Establece el atributo 'attribute' a 'value'
def set(self, attribute, value):
valueClass = value.__class__
# Si trato de dar un valor incorrecto a un atributo
# se levanta una excepcion
if valueClass not in self[attribute][1]:
raise BadElementCompositionError(attribute, self.__class__,
self[attribute][1])
# Si es un atributo ordenado o una lista de xmi.idref
if self[attribute][0].__class__ == [].__class__:
self[attribute][0].append(value)
# Si es un atributo desordenado
elif self[attribute][0].__class__ == {}.__class__:
# Si es el primero de una clase de elementos en el atributo
if valueClass not in self[attribute][0].keys():
self[attribute][0][valueClass] = {}
self[attribute][0][valueClass][value['xmi.id'][0]] = value
# Si es un otro elemento o un xmi.idref
else:
self[attribute] = (value, self[attribute][0])
# Devuelve el valor de un atributo del elemento
def get(self, attribute, className=None):
# Si es un atributo no ordenado y me das el nombre de la clase
# te devuelvo el diccionario de elementos de esa clase
if self[attribute][0].__class__ == {}.__class__ and className:
if className in self[attribute][0].keys():
return self[attribute][0][className]
else:
return {}
return self[attribute][0]
# Si el valor del atributo es un string que designa algo
# devuelvo que es de la clase string.
# Si puede ser una instancia devuelve None
def getAttributeCharacter(self, attribute):
if u''.__class__ in self[attribute][1]:
return u''.__class__
return None
# Elemento abstracto Namespace
class Namespace(ModelElement):
# Clases de elementos validos para atributo 'ownedElement'
validOwnedElements = []
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
self['ownedElement'] = ({}, self.validOwnedElements)
class Package(Namespace):
def __init__(self, xmi_id, name):
self.validOwnedElements = [Model, Package, Class, Interface,
Generalization, Association, Dependency,
Abstraction]
Namespace.__init__(self, xmi_id, name)
class Model(Namespace):
def __init__(self, xmi_id, name):
self.validOwnedElements = [DataType, Stereotype, TagDefinition,
Class, Interface, Generalization,
Association, Dependency, Abstraction,
Model, Package, CallEvent,
TimeEvent]
Namespace.__init__(self, xmi_id, name)
class Stereotype(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
self['baseClass'] = (None, [u''.__class__])
class TagDefinition(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
self['tagType'] = (None, [u''.__class__])
self['multiplicity'] = (None, [Multiplicity])
class TaggedValue(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['dataValue'] = (None, [u''.__class__])
self['type'] = (None, [u''.__class__]) # xmi.idref a TagDefinition
class DataType(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
# Elemento abstracto Classifier
class Classifier(ModelElement):
# Clases de elementos validos para atributo 'feature'
validFeatures = []
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
self['visibility'] = (None, [u''.__class__])
self['isLeaf'] = (None, [u''.__class__])
self['isAbstract'] = (None, [u''.__class__])
self['feature'] = ({}, self.validFeatures)
self['clientDependency'] = ([], [u''.__class__])
self['generalization'] = ([], [u''.__class__])
class Class(Classifier, Namespace):
def __init__(self, xmi_id, name):
self.validOwnedElements = [Class, Interface, StateMachine,
ActivityGraph]
self.validFeatures = [Attribute, Operation, Method]
Classifier.__init__(self, xmi_id, name)
Namespace.__init__(self, xmi_id, name)
class Feature(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
self['visibility'] = (None, [u''.__class__])
self['ownerScope'] = (None, [u''.__class__])
class Attribute(Feature):
def __init__(self, xmi_id, name):
Feature.__init__(self, xmi_id, name)
# xmi.idref DataType | Class | Interface
self['type'] = (None, [u''.__class__])
# changeable | frozen | addOnly
self['changeability'] = (None, [u''.__class__])
self['initialValue'] = (None, [Expression])
self['multiplicity'] = (None, [Multiplicity])
class Multiplicity(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['range'] = (None, [MultiplicityRange])
class MultiplicityRange(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['lower'] = (None, [u''.__class__])
self['upper'] = (None, [u''.__class__])
class Operation(Feature):
def __init__(self, xmi_id, name):
Feature.__init__(self, xmi_id, name)
self['isAbstract'] = (None, [u''.__class__])
self['isQuery'] = (None, [u''.__class__])
self['isLeaf'] = (None, [u''.__class__])
self['parameter'] = ([], [Parameter])
class Parameter(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
# in | out | inout | return
self['kind'] = (None, [u''.__class__])
# xmi.dref DataType | Class | Interface
self['type'] = (None, [u''.__class__])
self['defaultValue'] = (None, [Expression])
class Method(Feature):
def __init__(self, xmi_id, name=None):
Feature.__init__(self, xmi_id, name)
self['body'] = (None, [ProcedureExpression])
self['specification'] = (None, [u''.__class__])
class Expression(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['body'] = (None, [u''.__class__])
self['language'] = (None, [u''.__class__])
class ProcedureExpression(Expression):
pass
class Interface(Classifier, Namespace):
def __init__(self, xmi_id, name):
self.validFeatures = [Operation]
self.validOwnedElements = [Class, Interface]
Classifier.__init__(self, xmi_id, name)
Namespace.__init__(self, xmi_id, name)
class Generalization(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
# Nota: child y parent deben ser identificadores de
# referencia al mismo tipo de elemento
self['child'] = (None, [u''.__class__]) #xmi.idref Classifier
self['parent'] = (None, [u''.__class__]) #xmi.idref Classifier
# Asociacion del UML
class Association(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['connection'] = ({}, [AssociationEnd])
class AssociationEnd(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['multiplicity'] = (None, [Multiplicity])
# xmi.idref Class | Interface
self['participant'] = (None, [u''.__class__])
self['visibility'] = (None, [u''.__class__])
self['changeability'] = (None, [u''.__class__])
self['isNavigable'] = (None, [u''.__class__]) # true | false
self['targetScope'] = (None, [u''.__class__]) # classifier | instance
# composite | agregate | none
self['aggregation'] = (None, [u''.__class__])
class Dependency(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['client'] = (None, [u''.__class__]) # xmi.idref Class | Interface
self['supplier'] = (None, [u''.__class__]) # xmi.idref Class | Interface
# Abstraccion del UML. Este elemento esta aca, pues con el estereotipo
# <<realize>> se convierte en una realizacion de interfaz.
class Abstraction(Dependency):
pass
class CallEvent(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
# Nota: que son estos, parametros formales o actuales ?
# Segun la especificacion del UML son parametros formales
# no es necesario
self['operation'] = (None, [u''.__class__]) # xmi.idref Operation
class TimeEvent(ModelElement):
def __init__(self, xmi_id, name):
ModelElement.__init__(self, xmi_id, name)
self['when'] = (None, [TimeExpression])
class TimeExpression(Expression):
pass
#Maquina de estados del UML
class StateMachine(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['context'] = (None, [u''.__class__]) # xmi.idref a Class
self['top'] = (None, [CompositeState])
self['transition'] = ({}, [Transition])
class StateVertex(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['incoming'] = ([], [u''.__class__]) # xmi.idref a Transition
self['outgoing'] = ([], [u''.__class__]) # xmi.idref a Transition
class Pseudostate(StateVertex):
def __init__(self, xmi_id, name=None):
StateVertex.__init__(self, xmi_id, name)
self['kind'] = (None, [u''.__class__])
class State(StateVertex):
def __init__(self, xmi_id, name=None):
StateVertex.__init__(self, xmi_id, name)
actions = [ActionSequence, UninterpretedAction, ReturnAction]
self['entry'] = (None, actions)
self['doActivity'] = (None, actions)
self['exit'] = (None, actions)
class CompositeState(State):
def __init__(self, xmi_id, name=None):
self.validSubvertex = [CompositeState, SimpleState, FinalState,
Pseudostate, ActionState]
State.__init__(self, xmi_id, name)
self['subvertex'] = ({}, self.validSubvertex)
class SimpleState(State):
def __init__(self, xmi_id, name):
# Con esto obligo a que estos estados tengan nombre
State.__init__(self, xmi_id, name)
class FinalState(State):
pass
# Transicion del UML
class Transition(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
# xmi.idref a Pseudostate o SimpleState
self['source'] = (None, [u''.__class__])
# xmi.idref a SimpleState, FinalState o Pseudostate
self['target'] = (None, [u''.__class__])
# xmi.idref a CallEvent o TimeEvent
self['trigger'] = (None, [u''.__class__])
self['effect'] = (None, [ActionSequence, UninterpretedAction])
self['guard'] = (None, [Guard])
class Guard(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['expression'] = (None, [BooleanExpression])
# Expresion booleana
class BooleanExpression(Expression):
pass
# Accion
class Action(ModelElement):
def __init__(self, xmi_id, name=None):
ModelElement.__init__(self, xmi_id, name)
self['script'] = (None, [ActionExpression])
# Secuencia de acciones
# La secuencia de acciones es tambien una accion, mas
# no me interesa tener una sola expresion asociada a esta
class ActionSequence(Action):
def __init__(self, xmi_id, name=None):
Action.__init__(self, xmi_id, name)
self['action'] = ([], [ActionSequence, UninterpretedAction,
ReturnAction])
# Accion generica. Esta representa una sentencia en algun lenguaje
class UninterpretedAction(Action):
pass
# Accion de devolucion.
class ReturnAction(Action):
pass
# Expresion de accion
class ActionExpression(Expression):
pass
# Grafo de actividades
class ActivityGraph(StateMachine):
pass
# Estado de accion
class ActionState(SimpleState):
pass
| gpl-2.0 |
naparuba/kunai | data/global-configuration/packs/mongodb/collectors/bson/int64.py | 69 | 1056 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A BSON wrapper for long (int in python3)"""
from bson.py3compat import PY3
if PY3:
long = int
class Int64(long):
"""Representation of the BSON int64 type.
This is necessary because every integral number is an :class:`int` in
Python 3. Small integral numbers are encoded to BSON int32 by default,
but Int64 numbers will always be encoded to BSON int64.
:Parameters:
- `value`: the numeric value to represent
"""
_type_marker = 18
| mit |
rainbowintheshell/gnome-dvb-daemon | client/gnomedvb/ui/timers/EditTimersDialog.py | 3 | 11742 | # -*- coding: utf-8 -*-
# Copyright (C) 2008,2009 Sebastian Pölsterl
#
# This file is part of GNOME DVB Daemon.
#
# GNOME DVB Daemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GNOME DVB Daemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNOME DVB Daemon. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from gi.repository import Gtk
from gnomedvb import _
import datetime
import gnomedvb
from gnomedvb import global_error_handler
from gnomedvb.ui.timers.MessageDialogs import TimerFailureDialog
from gnomedvb.ui.timers.TimerDialog import TimerDialog
from gnomedvb.ui.widgets.CellRendererDatetime import CellRendererDatetime
class EditTimersDialog(Gtk.Dialog):
(COL_ID,
COL_CHANNEL,
COL_TITLE,
COL_START,
COL_DURATION,
COL_ACTIVE,) = list(range(6))
def __init__(self, device_group, parent=None):
"""
@param device_group: ID of device group
@type device_group: int
@param parent: Parent window
@type parent: Gtk.Window
"""
Gtk.Dialog.__init__(self, title=_("Recording schedule"),
parent=parent)
self.recorder = None
self.set_modal(True)
self.set_destroy_with_parent(True)
self.device_group = device_group
self.set_recorder(device_group)
close_button = self.add_button(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)
close_button.grab_default()
self.set_default_size(550, 400)
self.set_border_width(5)
self.main_box = Gtk.Box(spacing=12)
self.main_box.set_border_width(5)
self.main_box.show()
self.get_content_area().pack_start(self.main_box, True, True, 0)
self.timerslist = Gtk.ListStore(int, str, str, GObject.TYPE_PYOBJECT, int, bool)
self.timerslist.set_sort_func(self.COL_START,
self._datetime_sort_func)
self.timersview = Gtk.TreeView.new_with_model(self.timerslist)
self.timersview.get_selection().connect("changed",
self._on_timers_selection_changed)
col_channel = Gtk.TreeViewColumn(_("Channel"))
cell_rec = Gtk.CellRendererPixbuf()
col_channel.pack_start(cell_rec, True)
col_channel.set_cell_data_func(cell_rec, self._get_recording_icon_for_cell, None)
col_channel.add_attribute(cell_rec, "stock-id", self.COL_ACTIVE)
cell_channel = Gtk.CellRendererText()
col_channel.pack_start(cell_channel, True)
col_channel.add_attribute(cell_channel, "text", self.COL_CHANNEL)
self.timersview.append_column(col_channel)
col_title = Gtk.TreeViewColumn(_("Title"))
cell_title = Gtk.CellRendererText()
col_title.pack_start(cell_title, True)
col_title.add_attribute(cell_title, "text", self.COL_TITLE)
self.timersview.append_column(col_title)
cell_starttime = CellRendererDatetime()
col_starttime = Gtk.TreeViewColumn(_("Start time"))
col_starttime.pack_start(cell_starttime, True)
col_starttime.add_attribute(cell_starttime, "datetime", self.COL_START)
self.timersview.append_column(col_starttime)
cell_duration = Gtk.CellRendererText()
col_duration = Gtk.TreeViewColumn(_("Duration"))
col_duration.pack_start(cell_duration, False)
col_duration.set_cell_data_func(cell_duration, self._get_duration_data, None)
self.timersview.append_column(col_duration)
self.scrolledwindow = Gtk.ScrolledWindow()
self.scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.scrolledwindow.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.scrolledwindow.add(self.timersview)
self.main_box.pack_start(self.scrolledwindow, True, True, 0)
self.buttonbox = Gtk.ButtonBox(orientation=Gtk.Orientation.VERTICAL)
self.buttonbox.set_spacing(6)
self.buttonbox.set_layout(Gtk.ButtonBoxStyle.START)
self.button_add = Gtk.Button(stock=Gtk.STOCK_ADD)
self.button_add.connect("clicked", self._on_button_add_clicked)
self.buttonbox.pack_start(self.button_add, True, True, 0)
self.button_delete = Gtk.Button(stock=Gtk.STOCK_DELETE)
self.button_delete.connect("clicked", self._on_button_delete_clicked)
self.button_delete.set_sensitive(False)
self.buttonbox.pack_start(self.button_delete, True, True, 0)
self.button_edit = Gtk.Button(stock=Gtk.STOCK_EDIT)
self.button_edit.connect("clicked", self._on_button_edit_clicked)
self.button_edit.set_sensitive(False)
self.buttonbox.pack_start(self.button_edit, True, True, 0)
self.main_box.pack_start(self.buttonbox, False, False, 0)
self.get_timers()
self.show_all()
def set_recorder(self, dev_group):
self.recorder = dev_group.get_recorder()
self.recorder.connect("changed", self._on_recorder_changed)
self.recorder.connect("recording-started", self._set_recording_state, True)
self.recorder.connect("recording-finished", self._set_recording_state, False)
def get_timers(self):
def add_timer(proxy, timers, user_data):
for timer_id in timers:
self._add_timer(timer_id)
self.recorder.get_timers(result_handler=add_timer, error_handler=global_error_handler)
def _add_timer(self, timer_id):
start_list, success = self.recorder.get_start_time(timer_id)
if success:
starttime = datetime.datetime(*start_list)
(duration, active, channel, title) = self.recorder.get_all_informations(timer_id)[0][1:]
self.timerslist.append([int(timer_id), channel, title, starttime, duration, bool(active)])
def _remove_timer(self, timer_id):
for row in self.timerslist:
if row[self.COL_ID] == timer_id:
self.timerslist.remove(row.iter)
def _on_button_delete_clicked(self, button):
def delete_timer_callback(proxy, success, user_data):
if not success:
error_dialog = Gtk.MessageDialog(parent=self,
flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT,
type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK)
error_dialog.set_markup(
"<big><span weight=\"bold\">%s</span></big>" % _("Timer could not be deleted"))
error_dialog.run()
error_dialog.destroy()
model, aiter = self.timersview.get_selection().get_selected()
if aiter != None:
timer_id = model[aiter][self.COL_ID]
if self.recorder.is_timer_active(timer_id):
dialog = Gtk.MessageDialog(parent=self,
flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT,
type=Gtk.MessageType.QUESTION, buttons=Gtk.ButtonsType.YES_NO)
dialog.set_markup(
"<big><span weight=\"bold\">%s</span></big>" % _("Abort active recording?"))
dialog.format_secondary_text(
_("The timer you selected belongs to a currently active recording.") + " " +
_("Deleting this timer will abort the recording."))
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
self.recorder.delete_timer(timer_id,
result_handler=delete_timer_callback,
error_handler=global_error_handler)
else:
self.recorder.delete_timer(timer_id,
result_handler=delete_timer_callback,
error_handler=global_error_handler)
def _on_button_add_clicked(self, button):
def add_timer_callback(proxy, data, user_data):
rec_id, success = data
if not success:
err_dialog = TimerFailureDialog(self)
err_dialog.run()
err_dialog.destroy()
dialog = TimerDialog(self, self.device_group)
response_id = dialog.run()
if response_id == Gtk.ResponseType.ACCEPT:
duration = dialog.get_duration()
start = dialog.get_start_time()
channel = dialog.get_channel()
self.recorder.add_timer (channel, start[0],
start[1], start[2], start[3], start[4], duration,
result_handler=add_timer_callback,
error_handler=global_error_handler)
dialog.destroy()
def _on_button_edit_clicked(self, button):
model, aiter = self.timersview.get_selection().get_selected()
if aiter != None:
start = model[aiter][self.COL_START]
duration = model[aiter][self.COL_DURATION]
channel = model[aiter][self.COL_CHANNEL]
dialog = TimerDialog(self, self.device_group, channel=channel,
starttime=start, duration=duration)
dialog.set_time_and_date_editable(
not model[aiter][self.COL_ACTIVE])
response_id = dialog.run()
if response_id == Gtk.ResponseType.ACCEPT:
timer_id = model[aiter][self.COL_ID]
new_duration = dialog.get_duration()
new_start = dialog.get_start_time()
self.recorder.set_start_time(timer_id, new_start[0],
new_start[1], new_start[2], new_start[3], new_start[4])
self.recorder.set_duration(timer_id, new_duration)
dialog.destroy()
def _on_recorder_changed(self, recorder, timer_id, typeid):
if recorder == self.recorder:
if (typeid == 0):
# Timer added
self._add_timer(timer_id)
elif (typeid == 1):
# Timer deleted
self._remove_timer(timer_id)
elif (typeid == 2):
# Timer changed
self._remove_timer(timer_id)
self._add_timer(timer_id)
def _on_timers_selection_changed(self, treeselection):
model, aiter = treeselection.get_selected()
if aiter == None:
self.button_delete.set_sensitive(False)
self.button_edit.set_sensitive(False)
else:
self.button_delete.set_sensitive(True)
self.button_edit.set_sensitive(True)
def _set_recording_state(self, recorder, timer_id, state):
for row in self.timerslist:
if row[self.COL_ID] == timer_id:
self.timerslist[row.iter][self.COL_ACTIVE] = bool(state)
def _get_recording_icon_for_cell(self, column, cell, model, aiter, user_data):
if model[aiter][self.COL_ACTIVE]:
cell.set_property("stock-id", Gtk.STOCK_MEDIA_RECORD)
def _get_duration_data(self, column, cell, model, aiter, user_data):
# We have minutes but need seconds
duration = model[aiter][self.COL_DURATION] * 60
duration_str = gnomedvb.seconds_to_time_duration_string(duration)
cell.set_property("text", duration_str)
def _datetime_sort_func(self, treemodel, iter1, iter2):
d1 = treemodel[iter1][self.COL_START]
d2 = treemodel[iter2][self.COL_START]
return cmp(d1, d2)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.