repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
JiaminXuan/leetcode-python | surrounded_regions/solution.py | 7 | 1646 | class Solution:
# @param board, a 9x9 2D array
# Capture all regions by modifying the input board in-place.
# Do not return any value.
def solve(self, board):
n = len(board)
if n == 0:
return
m = len(board[0])
if m == 0:
return
self.n = n
self.m = m
# Go through the four edges to search O's
for i in range(n):
for j in range(m):
if i == 0 or i == n - 1 or j == 0 or j == m - 1:
if board[i][j] == 'O':
self.bfs(board, j, i)
for i in range(n):
for j in range(m):
if board[i][j] == 'O':
board[i][j] = 'X'
if board[i][j] == 'Y':
board[i][j] = 'O'
def bfs(self, board, x, y):
"""Use BFS to set O to Y"""
queue = []
board[y][x] = 'Y'
queue.append((x, y))
while queue:
root_x, root_y = queue.pop(0)
for node in self.adjacent(board, root_x, root_y):
x, y = node
if board[y][x] != 'Y':
board[y][x] = 'Y'
queue.append((x, y))
def adjacent(self, board, x, y):
res = []
if x + 1 < self.m and board[y][x + 1] == 'O':
res.append((x + 1, y))
if x - 1 > 0 and board[y][x - 1] == 'O':
res.append((x - 1, y))
if y + 1 < self.n and board[y + 1][x] == 'O':
res.append((x, y + 1))
if y - 1 > 0 and board[y - 1][x] == 'O':
res.append((x, y - 1))
return res
| bsd-2-clause |
xsuchy/ordered-set | test.py | 2 | 1338 | from nose.tools import eq_
from ordered_set import OrderedSet
import pickle
def test_pickle():
set1 = OrderedSet('abracadabra')
roundtrip = pickle.loads(pickle.dumps(set1))
assert roundtrip == set1
def test_empty_pickle():
empty_oset = OrderedSet()
empty_roundtrip = pickle.loads(pickle.dumps(empty_oset))
assert empty_roundtrip == empty_oset
def test_order():
set1 = OrderedSet('abracadabra')
eq_(len(set1), 5)
eq_(set1, OrderedSet(['a', 'b', 'r', 'c', 'd']))
eq_(list(reversed(set1)), ['d', 'c', 'r', 'b', 'a'])
def test_binary_operations():
set1 = OrderedSet('abracadabra')
set2 = OrderedSet('simsalabim')
assert set1 != set2
eq_(set1 & set2, OrderedSet(['a', 'b']))
eq_(set1 | set2, OrderedSet(['a', 'b', 'r', 'c', 'd', 's', 'i', 'm', 'l']))
eq_(set1 - set2, OrderedSet(['r', 'c', 'd']))
def test_indexing():
set1 = OrderedSet('abracadabra')
eq_(set1[:], set1)
eq_(set1.copy(), set1)
assert set1[:] is set1
assert set1.copy() is not set1
eq_(set1[[1, 2]], OrderedSet(['b', 'r']))
eq_(set1[1:3], OrderedSet(['b', 'r']))
eq_(set1.index('b'), 1)
eq_(set1.index(('b', 'r')), [1, 2])
try:
set1.index('br')
assert False, "Looking up a nonexistent key should be a KeyError"
except KeyError:
pass
| mit |
anas-taji/purchase-workflow | __unported__/purchase_group_hooks/__openerp__.py | 13 | 2003 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Add hooks to the merge PO feature.',
'version': '0.1',
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'category': 'Purchase Management',
'complexity': "normal",
'depends': ['purchase'],
'description': """
In the core OpenERP purchase module, there is a wizard to merge purchase
orders. That feature is convenient, but as soon as a field is added to the
purchase order, it does not work anymore and needs to be patched.
The original implementation does not provide any hooks for extension, and
modules can only reimplement a method completely. This required a lot of copy
and paste, and worse, it breaks if two modules attempt to do that.
Therefore, this module reimplements the feature, with the same basic result
in the standard case. Hooks are provided for extra modules that add fields
or change the logic.
""",
'website': 'http://www.camptocamp.com/',
'data': [],
'installable': False,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
'test': ['test/merge_order.yml'],
}
| agpl-3.0 |
elky/django | tests/forms_tests/field_tests/test_datetimefield.py | 98 | 5103 | import datetime
from django.forms import DateTimeField, ValidationError
from django.test import SimpleTestCase
class DateTimeFieldTest(SimpleTestCase):
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean('hello')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean('2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean('2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean(' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
| bsd-3-clause |
zuosc/PythonCode | 8mapandreduce.py | 1 | 3202 | # -*- coding:utf8 -*-
# Power by zuosc 2016-10-12
#map和reduce
def f(x):
return x * x
r = map(f, [1, 2, 3, 4, 5, 6, 7])
print(list(r))
print('---------------------------------')
print(list(map(str, [1, 2, 3, 4, 5, 6, 7, 8, 9])))
print('----------------------------------')
def add(x, y):
return x + y
from functools import reduce
r = reduce(add, [1, 2, 1, 2, 2])
print(r)
print('----------------------------')
def fn(x, y):
return x * 10 + y
r = reduce(fn, [1, 2, 3, 9])
print(r)
print('---------------------------')
def fn(x, y):
return x * 10 + y
def char3num(s):
return {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9}[s]
r = reduce(fn, map(char3num, '44885'))
print(r)
#################################################################
def str2int(s):
def fn(x, y):
return x * 10 + y
def char2num(s):
return {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9}[s]
#使用lambda
#return reduce(lambda x, y: x * 10 + y, map(char2num, s))
return reduce(fn, map(char2num, s))
print(str2int('123123123123'))
def prod(L):
return reduce(lambda x, y: x * y, L)
print('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))
# 练习 (3)
# 利用map和reduce编写一个str2float函数,把字符串'123.456'转换成浮点数123.456
# 函数定义
def str2float(s):
# 通过 ‘.’ 分割数字字符串,赋值对应的 ‘.’ 左右字符串变量
l_s_int, r_s_float = s.split('.')
# 字符串 ‘.’ 右侧长度
r_s_len = len(r_s_float)
# 字符转数字函数
def char2int(s):
return {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9}[s]
# 左侧-整数字符串转换
l_transfer = reduce(lambda x, y: x * 10 + y, map(char2int, l_s_int))
# 右侧-整数字符串转换
r_transfer = reduce(lambda x, y: x * 10 + y, map(char2int, r_s_float))
# 注意:
# (1)、r_transfer / 10 ** r_s_len: expression python2 return 0
# (1)、r_transfer / 10 ** r_s_len: expression python3 return 0.456
return l_transfer + r_transfer / 10**r_s_len
print(str2float('123.456'))
print(type(str2float('123.456')))
def str2float(s):
l_int, r_int = s.split('.')
r_len = len(r_int)
# 字符转数字函数
def char2int(s):
return {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9}[s]
l_tr = reduce(lambda x, y: x * 10 + y, map(char2int, l_int))
r_tr = reduce(lambda x, y: x * 10 + y, map(char2int, r_int))
return l_tr + r_tr / 10**r_len
print(str2float('1454654.1524814'))
| mit |
flyfei/python-for-android | python3-alpha/python3-src/Lib/lib2to3/tests/test_parser.py | 47 | 6147 | """Test suite for 2to3's parser and grammar files.
This is the place to add tests for changes to 2to3's grammar, such as those
merging the grammars for Python 2 and 3. In addition to specific tests for
parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3.
"""
from __future__ import with_statement
# Testing imports
from . import support
from .support import driver, test_dir
# Python imports
import os
# Local imports
from lib2to3.pgen2 import tokenize
from ..pgen2.parse import ParseError
class GrammarTest(support.TestCase):
def validate(self, code):
support.parse_string(code)
def invalid_syntax(self, code):
try:
self.validate(code)
except ParseError:
pass
else:
raise AssertionError("Syntax shouldn't have been valid")
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
def test_2x_style_2(self):
self.validate("raise E, V")
def test_2x_style_3(self):
self.validate("raise E, V, T")
def test_2x_style_invalid_1(self):
self.invalid_syntax("raise E, V, T, Z")
def test_3x_style(self):
self.validate("raise E1 from E2")
def test_3x_style_invalid_1(self):
self.invalid_syntax("raise E, V from E1")
def test_3x_style_invalid_2(self):
self.invalid_syntax("raise E from E1, E2")
def test_3x_style_invalid_3(self):
self.invalid_syntax("raise from E1, E2")
def test_3x_style_invalid_4(self):
self.invalid_syntax("raise E from")
# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
def test_2(self):
self.validate("""def f(x:int): pass""")
def test_3(self):
self.validate("""def f(*x:str): pass""")
def test_4(self):
self.validate("""def f(**x:float): pass""")
def test_5(self):
self.validate("""def f(x, y:1+2): pass""")
def test_6(self):
self.validate("""def f(a, (b:1, c:2, d)): pass""")
def test_7(self):
self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
def test_8(self):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.validate(s)
class TestExcept(GrammarTest):
def test_new(self):
s = """
try:
x
except E as N:
y"""
self.validate(s)
def test_old(self):
s = """
try:
x
except E, N:
y"""
self.validate(s)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
self.validate("""x = {'one'}""")
def test_2(self):
self.validate("""x = {'one', 1,}""")
def test_3(self):
self.validate("""x = {'one', 'two', 'three'}""")
def test_4(self):
self.validate("""x = {2, 3, 4,}""")
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
self.validate("""0o7777777777777""")
self.invalid_syntax("""0o7324528887""")
def test_new_binary_notation(self):
self.validate("""0b101010""")
self.invalid_syntax("""0b0101021""")
class TestClassDef(GrammarTest):
def test_new_syntax(self):
self.validate("class B(t=7): pass")
self.validate("class B(t, *args): pass")
self.validate("class B(t, **kwargs): pass")
self.validate("class B(t, *args, **kwargs): pass")
self.validate("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(support.TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_all_project_files(self):
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertTrue(encoding is not None,
"can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read()
source = source.decode(encoding)
tree = driver.parse_string(source)
new = str(tree)
if encoding:
new = new.encode(encoding)
if diff(filepath, new):
self.fail("Idempotency failed: %s" % filepath)
def test_extended_unpacking(self):
driver.parse_string("a, *b, c = x\n")
driver.parse_string("[*a, b] = x\n")
driver.parse_string("(z, *y, w) = m\n")
driver.parse_string("for *z, m in d: pass\n")
class TestLiterals(GrammarTest):
def validate(self, s):
driver.parse_string(support.dedent(s) + "\n\n")
def test_multiline_bytes_literals(self):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def test_multiline_bytes_tripquote_literals(self):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
self.validate(s)
def test_multiline_str_literals(self):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def diff(fn, result, encoding):
f = open("@", "w")
try:
f.write(result.encode(encoding))
finally:
f.close()
try:
fn = fn.replace('"', '\\"')
return os.system('diff -u "%s" @' % fn)
finally:
os.remove("@")
| apache-2.0 |
brandond/ansible | lib/ansible/plugins/strategy/__init__.py | 4 | 54656 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, itervalues, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.loader import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys
display = Display()
__all__ = ['StrategyBase']
class StrategySentinel:
pass
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.action_loader = action_loader
self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.test_loader = test_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
_sentinel = StrategySentinel()
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
else:
strategy._results_lock.acquire()
strategy._results.append(result)
strategy._results_lock.release()
except (IOError, EOFError):
break
except Queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator._host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
iterator._host_states[host.name] = prev_host_state
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
self.flush_cache = context.CLIARGS.get('flush_cache', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
# this dictionary is used to keep track of hosts that have
# flushed handlers
self._flushed_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def cleanup(self):
# close active persistent connections
for sock in itervalues(self._active_connections):
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
[iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts)
if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)]
vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)]
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# and then queue the new task
try:
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
queued = False
starting_worker = self._cur_worker
while True:
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
host_list = [task_host]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
if host_name is not None:
actual_host = self._inventory.get_host(host_name)
if actual_host is None:
actual_host = Host(name=host_name)
else:
actual_host = Host(name=task.delegate_to)
return [actual_host]
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
def get_original_host(host_name):
# FIXME: this should not need x2 _inventory
host_name = to_text(host_name)
if host_name in self._inventory.hosts:
return self._inventory.hosts[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
if not handler_task.cached_name:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler_task)
templar = Templar(loader=self._loader, variables=handler_vars)
handler_task.name = templar.template(handler_task.name)
handler_task.cached_name = True
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
if handler_task.name == handler_name:
return handler_task
else:
if handler_task.get_name() == handler_name:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
queue_cache_entry = (original_host.name, task_result._task)
found_task = self._queued_task_cache.get(queue_cache_entry)['task']
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(task_result._result)
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
if state and iterator.get_active_state(state).run_state == iterator.ITERATING_RESCUE:
self._tqm._stats.increment('rescued', original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
else:
self._tqm._stats.increment('skipped', original_host.name)
task_result._result['skip_reason'] = 'Host %s is unreachable' % original_host.name
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, iterator)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action == 'include_vars':
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
if original_task.action != 'set_fact' or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if original_task.action == 'set_fact':
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action != 'include_role':?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_handler_results(self, iterator, handler, notified_hosts):
'''
Wait for the handler tasks to complete, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
handler_results = 0
display.debug("waiting for handler results...")
while (self._pending_results > 0 and
handler_results < len(notified_hosts) and
not self._tqm._terminated):
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
handler_results += len([
r._host for r in results if r._host in notified_hosts and
r.task_name == handler.name])
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending handlers, returning what we have")
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, iterator):
'''
Helper function to add a new host to inventory based on a task result.
'''
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host.vars = combine_vars(new_host.get_vars(), host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
self._inventory.add_group(group_name)
new_group = self._inventory.groups[group_name]
new_group.add_host(self._inventory.hosts[host_name])
# reconcile inventory, ensures inventory rules are followed
self._inventory.reconcile_inventory()
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts.get(host.name)
if real_host is None:
if host.name == self._inventory.localhost.name:
real_host = self._inventory.localhost
else:
raise AnsibleError('%s cannot be matched in inventory' % host.name)
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
for name in [group_name] + parent_group_names:
if name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self._inventory.groups[parent_group_name]
parent_group.add_child_group(group)
if real_host.name not in group.get_hosts():
group.add_host(real_host)
changed = True
if group_name not in host.get_groups():
real_host.add_group(group)
changed = True
if changed:
self._inventory.reconcile_inventory()
return changed
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._args)
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option",
version='2.12')
included_file._task.tags = tags
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(included_file._filename)
else:
reason = to_text(e)
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler.notified_hosts:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler)
handler_name = handler.get_name()
result = self._do_handler_run(handler, handler_name, iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
# if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
if notified_hosts is None:
notified_hosts = handler.notified_hosts[:]
notified_hosts = self._filter_notified_hosts(notified_hosts)
if len(notified_hosts) > 0:
saved_name = handler.name
handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
handler.name = saved_name
run_once = False
try:
action = action_loader.get(handler.action, class_only=True)
if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not iterator.is_failed(host) or iterator._play.force_handlers:
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler)
self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
if run_once:
break
# collect the results from the handler run
host_results = self._wait_on_handler_results(iterator, handler, notified_hosts)
try:
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError:
return False
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
for task in block.block:
task_name = task.get_name()
display.debug("adding task '%s' included in handler '%s'" % (task_name, handler_name))
task.notified_hosts = included_file._hosts[:]
result = self._do_handler_run(
handler=task,
handler_name=task_name,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(to_text(e))
continue
# remove hosts from notification list
handler.notified_hosts = [
h for h in handler.notified_hosts
if h not in notified_hosts]
display.debug("done running handlers, result is: %s" % result)
return result
def _filter_notified_hosts(self, notified_hosts):
'''
Filter notified hosts accordingly to strategy
'''
# As main strategy is linear, we do not filter hosts
# We return a copy to avoid race conditions
return notified_hosts[:]
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
if task.when:
self._cond_not_supported_warn(meta_action)
msg = "noop"
elif meta_action == 'flush_handlers':
if task.when:
self._cond_not_supported_warn(meta_action)
self._flushed_hosts[target_host] = True
self.run_handlers(iterator, play_context)
self._flushed_hosts[target_host] = False
msg = "ran handlers"
elif meta_action == 'refresh_inventory' or self.flush_cache:
if task.when:
self._cond_not_supported_warn(meta_action)
self._inventory.refresh_inventory()
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg = "cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play"
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator._host_states[target_host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play for %s" % target_host.name
else:
skipped = True
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
play_context.update_vars(all_vars)
if task.when:
self._cond_not_supported_warn(meta_action)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = connection_loader.get(play_context.connection, play_context, os.devnull)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._inventory.get_hosts(iterator._play.hosts, order=iterator._play.order):
if host.name not in self._tqm._unreachable_hosts:
hosts_left.append(host)
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, shared_loader_obj=None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
| gpl-3.0 |
Senseg/Py4A | python-modules/twisted/twisted/web/test/test_domhelpers.py | 53 | 11063 | # -*- test-case-name: twisted.web.test.test_domhelpers -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Specific tests for (some of) the methods in L{twisted.web.domhelpers}.
"""
from xml.dom import minidom
from twisted.trial.unittest import TestCase
from twisted.web import microdom
from twisted.web import domhelpers
class DOMHelpersTestsMixin:
"""
A mixin for L{TestCase} subclasses which defines test methods for
domhelpers functionality based on a DOM creation function provided by a
subclass.
"""
dom = None
def test_getElementsByTagName(self):
doc1 = self.dom.parseString('<foo/>')
actual=domhelpers.getElementsByTagName(doc1, 'foo')[0].nodeName
expected='foo'
self.assertEquals(actual, expected)
el1=doc1.documentElement
actual=domhelpers.getElementsByTagName(el1, 'foo')[0].nodeName
self.assertEqual(actual, expected)
doc2_xml='<a><foo in="a"/><b><foo in="b"/></b><c><foo in="c"/></c><foo in="d"/><foo in="ef"/><g><foo in="g"/><h><foo in="h"/></h></g></a>'
doc2 = self.dom.parseString(doc2_xml)
tag_list=domhelpers.getElementsByTagName(doc2, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
expected='abcdefgh'
self.assertEquals(actual, expected)
el2=doc2.documentElement
tag_list=domhelpers.getElementsByTagName(el2, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
self.assertEqual(actual, expected)
doc3_xml='''
<a><foo in="a"/>
<b><foo in="b"/>
<d><foo in="d"/>
<g><foo in="g"/></g>
<h><foo in="h"/></h>
</d>
<e><foo in="e"/>
<i><foo in="i"/></i>
</e>
</b>
<c><foo in="c"/>
<f><foo in="f"/>
<j><foo in="j"/></j>
</f>
</c>
</a>'''
doc3 = self.dom.parseString(doc3_xml)
tag_list=domhelpers.getElementsByTagName(doc3, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
expected='abdgheicfj'
self.assertEquals(actual, expected)
el3=doc3.documentElement
tag_list=domhelpers.getElementsByTagName(el3, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
self.assertEqual(actual, expected)
doc4_xml='<foo><bar></bar><baz><foo/></baz></foo>'
doc4 = self.dom.parseString(doc4_xml)
actual=domhelpers.getElementsByTagName(doc4, 'foo')
root=doc4.documentElement
expected=[root, root.childNodes[-1].childNodes[0]]
self.assertEquals(actual, expected)
actual=domhelpers.getElementsByTagName(root, 'foo')
self.assertEqual(actual, expected)
def test_gatherTextNodes(self):
doc1 = self.dom.parseString('<a>foo</a>')
actual=domhelpers.gatherTextNodes(doc1)
expected='foo'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc1.documentElement)
self.assertEqual(actual, expected)
doc2_xml='<a>a<b>b</b><c>c</c>def<g>g<h>h</h></g></a>'
doc2 = self.dom.parseString(doc2_xml)
actual=domhelpers.gatherTextNodes(doc2)
expected='abcdefgh'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc2.documentElement)
self.assertEqual(actual, expected)
doc3_xml=('<a>a<b>b<d>d<g>g</g><h>h</h></d><e>e<i>i</i></e></b>' +
'<c>c<f>f<j>j</j></f></c></a>')
doc3 = self.dom.parseString(doc3_xml)
actual=domhelpers.gatherTextNodes(doc3)
expected='abdgheicfj'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc3.documentElement)
self.assertEqual(actual, expected)
def test_clearNode(self):
doc1 = self.dom.parseString('<a><b><c><d/></c></b></a>')
a_node=doc1.documentElement
domhelpers.clearNode(a_node)
self.assertEqual(
a_node.toxml(),
self.dom.Element('a').toxml())
doc2 = self.dom.parseString('<a><b><c><d/></c></b></a>')
b_node=doc2.documentElement.childNodes[0]
domhelpers.clearNode(b_node)
actual=doc2.documentElement.toxml()
expected = self.dom.Element('a')
expected.appendChild(self.dom.Element('b'))
self.assertEqual(actual, expected.toxml())
def test_get(self):
doc1 = self.dom.parseString('<a><b id="bar"/><c class="foo"/></a>')
node=domhelpers.get(doc1, "foo")
actual=node.toxml()
expected = self.dom.Element('c')
expected.setAttribute('class', 'foo')
self.assertEqual(actual, expected.toxml())
node=domhelpers.get(doc1, "bar")
actual=node.toxml()
expected = self.dom.Element('b')
expected.setAttribute('id', 'bar')
self.assertEqual(actual, expected.toxml())
self.assertRaises(domhelpers.NodeLookupError,
domhelpers.get,
doc1,
"pzork")
def test_getIfExists(self):
doc1 = self.dom.parseString('<a><b id="bar"/><c class="foo"/></a>')
node=domhelpers.getIfExists(doc1, "foo")
actual=node.toxml()
expected = self.dom.Element('c')
expected.setAttribute('class', 'foo')
self.assertEqual(actual, expected.toxml())
node=domhelpers.getIfExists(doc1, "pzork")
self.assertIdentical(node, None)
def test_getAndClear(self):
doc1 = self.dom.parseString('<a><b id="foo"><c></c></b></a>')
node=domhelpers.getAndClear(doc1, "foo")
actual=node.toxml()
expected = self.dom.Element('b')
expected.setAttribute('id', 'foo')
self.assertEqual(actual, expected.toxml())
def test_locateNodes(self):
doc1 = self.dom.parseString('<a><b foo="olive"><c foo="olive"/></b><d foo="poopy"/></a>')
node_list=domhelpers.locateNodes(
doc1.childNodes, 'foo', 'olive', noNesting=1)
actual=''.join([node.toxml() for node in node_list])
expected = self.dom.Element('b')
expected.setAttribute('foo', 'olive')
c = self.dom.Element('c')
c.setAttribute('foo', 'olive')
expected.appendChild(c)
self.assertEqual(actual, expected.toxml())
node_list=domhelpers.locateNodes(
doc1.childNodes, 'foo', 'olive', noNesting=0)
actual=''.join([node.toxml() for node in node_list])
self.assertEqual(actual, expected.toxml() + c.toxml())
def test_getParents(self):
doc1 = self.dom.parseString('<a><b><c><d/></c><e/></b><f/></a>')
node_list = domhelpers.getParents(
doc1.childNodes[0].childNodes[0].childNodes[0])
actual = ''.join([node.tagName for node in node_list
if hasattr(node, 'tagName')])
self.assertEqual(actual, 'cba')
def test_findElementsWithAttribute(self):
doc1 = self.dom.parseString('<a foo="1"><b foo="2"/><c foo="1"/><d/></a>')
node_list = domhelpers.findElementsWithAttribute(doc1, 'foo')
actual = ''.join([node.tagName for node in node_list])
self.assertEqual(actual, 'abc')
node_list = domhelpers.findElementsWithAttribute(doc1, 'foo', '1')
actual = ''.join([node.tagName for node in node_list])
self.assertEqual(actual, 'ac')
def test_findNodesNamed(self):
doc1 = self.dom.parseString('<doc><foo/><bar/><foo>a</foo></doc>')
node_list = domhelpers.findNodesNamed(doc1, 'foo')
actual = len(node_list)
self.assertEqual(actual, 2)
# NOT SURE WHAT THESE ARE SUPPOSED TO DO..
# def test_RawText FIXME
# def test_superSetAttribute FIXME
# def test_superPrependAttribute FIXME
# def test_superAppendAttribute FIXME
# def test_substitute FIXME
def test_escape(self):
j='this string " contains many & characters> xml< won\'t like'
expected='this string " contains many & characters> xml< won\'t like'
self.assertEqual(domhelpers.escape(j), expected)
def test_unescape(self):
j='this string " has && entities > < and some characters xml won\'t like<'
expected='this string " has && entities > < and some characters xml won\'t like<'
self.assertEqual(domhelpers.unescape(j), expected)
def test_getNodeText(self):
"""
L{getNodeText} returns the concatenation of all the text data at or
beneath the node passed to it.
"""
node = self.dom.parseString('<foo><bar>baz</bar><bar>quux</bar></foo>')
self.assertEqual(domhelpers.getNodeText(node), "bazquux")
class MicroDOMHelpersTests(DOMHelpersTestsMixin, TestCase):
dom = microdom
def test_gatherTextNodesDropsWhitespace(self):
"""
Microdom discards whitespace-only text nodes, so L{gatherTextNodes}
returns only the text from nodes which had non-whitespace characters.
"""
doc4_xml='''<html>
<head>
</head>
<body>
stuff
</body>
</html>
'''
doc4 = self.dom.parseString(doc4_xml)
actual = domhelpers.gatherTextNodes(doc4)
expected = '\n stuff\n '
self.assertEqual(actual, expected)
actual = domhelpers.gatherTextNodes(doc4.documentElement)
self.assertEqual(actual, expected)
def test_textEntitiesNotDecoded(self):
"""
Microdom does not decode entities in text nodes.
"""
doc5_xml='<x>Souffl&</x>'
doc5 = self.dom.parseString(doc5_xml)
actual=domhelpers.gatherTextNodes(doc5)
expected='Souffl&'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc5.documentElement)
self.assertEqual(actual, expected)
class MiniDOMHelpersTests(DOMHelpersTestsMixin, TestCase):
dom = minidom
def test_textEntitiesDecoded(self):
"""
Minidom does decode entities in text nodes.
"""
doc5_xml='<x>Souffl&</x>'
doc5 = self.dom.parseString(doc5_xml)
actual=domhelpers.gatherTextNodes(doc5)
expected='Souffl&'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc5.documentElement)
self.assertEqual(actual, expected)
def test_getNodeUnicodeText(self):
"""
L{domhelpers.getNodeText} returns a C{unicode} string when text
nodes are represented in the DOM with unicode, whether or not there
are non-ASCII characters present.
"""
node = self.dom.parseString("<foo>bar</foo>")
text = domhelpers.getNodeText(node)
self.assertEqual(text, u"bar")
self.assertIsInstance(text, unicode)
node = self.dom.parseString(u"<foo>\N{SNOWMAN}</foo>".encode('utf-8'))
text = domhelpers.getNodeText(node)
self.assertEqual(text, u"\N{SNOWMAN}")
self.assertIsInstance(text, unicode)
| apache-2.0 |
sigrokproject/libsigrokdecode | decoders/usb_request/pd.py | 3 | 14610 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Stefan Brüns <stefan.bruens@rwth-aachen.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
import struct
class SamplerateError(Exception):
pass
class pcap_usb_pkt():
# Linux usbmon format, see Documentation/usb/usbmon.txt
h = b'\x00\x00\x00\x00' # ID part 1
h += b'\x00\x00\x00\x00' # ID part 2
h += b'C' # 'S'ubmit / 'C'omplete / 'E'rror
h += b'\x03' # ISO (0), Intr, Control, Bulk (3)
h += b'\x00' # Endpoint
h += b'\x00' # Device address
h += b'\x00\x00' # Bus number
h += b'-' # Setup tag - 0: Setup present, '-' otherwise
h += b'<' # Data tag - '<' no data, 0 otherwise
# Timestamp
h += b'\x00\x00\x00\x00' # TS seconds part 1
h += b'\x00\x00\x00\x00' # TS seconds part 2
h += b'\x00\x00\x00\x00' # TS useconds
#
h += b'\x00\x00\x00\x00' # Status 0: OK
h += b'\x00\x00\x00\x00' # URB length
h += b'\x00\x00\x00\x00' # Data length
# Setup packet data, valid if setup tag == 0
h += b'\x00' # bmRequestType
h += b'\x00' # bRequest
h += b'\x00\x00' # wValue
h += b'\x00\x00' # wIndex
h += b'\x00\x00' # wLength
#
h += b'\x00\x00\x00\x00' # ISO/interrupt interval
h += b'\x00\x00\x00\x00' # ISO start frame
h += b'\x00\x00\x00\x00' # URB flags
h += b'\x00\x00\x00\x00' # Number of ISO descriptors
def __init__(self, req, ts, is_submit):
self.header = bytearray(pcap_usb_pkt.h)
self.data = b''
self.set_urbid(req['id'])
self.set_urbtype('S' if is_submit else 'C')
self.set_timestamp(ts)
self.set_addr_ep(req['addr'], req['ep'])
if req['type'] in ('SETUP IN', 'SETUP OUT'):
self.set_transfertype(2) # Control
self.set_setup(req['setup_data'])
if req['type'] in ('BULK IN'):
self.set_addr_ep(req['addr'], 0x80 | req['ep'])
self.set_data(req['data'])
def set_urbid(self, urbid):
self.header[4:8] = struct.pack('>I', urbid)
def set_urbtype(self, urbtype):
self.header[8] = ord(urbtype)
def set_transfertype(self, transfertype):
self.header[9] = transfertype
def set_addr_ep(self, addr, ep):
self.header[11] = addr
self.header[10] = ep
def set_timestamp(self, ts):
self.timestamp = ts
self.header[20:24] = struct.pack('>I', ts[0]) # seconds
self.header[24:28] = struct.pack('>I', ts[1]) # microseconds
def set_data(self, data):
self.data = data
self.header[15] = 0
self.header[36:40] = struct.pack('>I', len(data))
def set_setup(self, data):
self.header[14] = 0
self.header[40:48] = data
def packet(self):
return bytes(self.header) + bytes(self.data)
def record_header(self):
# See https://wiki.wireshark.org/Development/LibpcapFileFormat.
(secs, usecs) = self.timestamp
h = struct.pack('>I', secs) # TS seconds
h += struct.pack('>I', usecs) # TS microseconds
# No truncation, so both lengths are the same.
h += struct.pack('>I', len(self)) # Captured len (usb hdr + data)
h += struct.pack('>I', len(self)) # Original len
return h
def __len__(self):
return 64 + len(self.data)
class Decoder(srd.Decoder):
api_version = 3
id = 'usb_request'
name = 'USB request'
longname = 'Universal Serial Bus (LS/FS) transaction/request'
desc = 'USB (low-speed/full-speed) transaction/request protocol.'
license = 'gplv2+'
inputs = ['usb_packet']
outputs = ['usb_request']
options = (
{'id': 'in_request_start', 'desc': 'Start IN requests on',
'default': 'submit', 'values': ('submit', 'first-ack')},
)
tags = ['PC']
annotations = (
('request-setup-read', 'Setup: Device-to-host'),
('request-setup-write', 'Setup: Host-to-device'),
('request-bulk-read', 'Bulk: Device-to-host'),
('request-bulk-write', 'Bulk: Host-to-device'),
('error', 'Unexpected packet'),
)
annotation_rows = (
('request-setup', 'USB SETUP', (0, 1)),
('request-in', 'USB BULK IN', (2,)),
('request-out', 'USB BULK OUT', (3,)),
('errors', 'Errors', (4,)),
)
binary = (
('pcap', 'PCAP format'),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.request = {}
self.request_id = 0
self.transaction_state = 'IDLE'
self.ss_transaction = None
self.es_transaction = None
self.transaction_ep = None
self.transaction_addr = None
self.wrote_pcap_header = False
def putr(self, ss, es, data):
self.put(ss, es, self.out_ann, data)
def putb(self, ts, data):
self.put(ts, ts, self.out_binary, data)
def pcap_global_header(self):
# See https://wiki.wireshark.org/Development/LibpcapFileFormat.
h = b'\xa1\xb2\xc3\xd4' # Magic, indicate microsecond ts resolution
h += b'\x00\x02' # Major version 2
h += b'\x00\x04' # Minor version 4
h += b'\x00\x00\x00\x00' # Correction vs. UTC, seconds
h += b'\x00\x00\x00\x00' # Timestamp accuracy
h += b'\xff\xff\xff\xff' # Max packet len
# LINKTYPE_USB_LINUX_MMAPPED 220
# Linux usbmon format, see Documentation/usb/usbmon.txt.
h += b'\x00\x00\x00\xdc' # Link layer
return h
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
if self.samplerate:
self.secs_per_sample = float(1) / float(self.samplerate)
def start(self):
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_ann = self.register(srd.OUTPUT_ANN)
self.in_request_start = self.options['in_request_start']
def handle_transfer(self):
request_started = 0
request_end = self.handshake in ('ACK', 'STALL', 'timeout')
ep = self.transaction_ep
addr = self.transaction_addr
# Handle protocol STALLs, condition lasts until next SETUP transfer (8.5.3.4)
if self.transaction_type == 'SETUP' and (addr, ep) in self.request:
request = self.request[(addr,ep)]
if request['type'] in ('SETUP IN', 'SETUP OUT'):
request['es'] = self.ss_transaction
self.handle_request(0, 1)
if not (addr, ep) in self.request:
self.request[(addr, ep)] = {'setup_data': [], 'data': [],
'type': None, 'ss': self.ss_transaction, 'es': None,
'ss_data': None, 'id': self.request_id, 'addr': addr, 'ep': ep}
self.request_id += 1
request_started = 1
request = self.request[(addr,ep)]
if request_end:
request['es'] = self.es_transaction
request['handshake'] = self.handshake
# BULK or INTERRUPT transfer
if request['type'] in (None, 'BULK IN') and self.transaction_type == 'IN':
request['type'] = 'BULK IN'
if len(request['data']) == 0 and len(self.transaction_data) > 0:
request['ss_data'] = self.ss_transaction
request['data'] += self.transaction_data
self.handle_request(request_started, request_end)
elif request['type'] in (None, 'BULK OUT') and self.transaction_type == 'OUT':
request['type'] = 'BULK OUT'
if self.handshake == 'ACK':
request['data'] += self.transaction_data
self.handle_request(request_started, request_end)
# CONTROL, SETUP stage
elif request['type'] is None and self.transaction_type == 'SETUP':
request['setup_data'] = self.transaction_data
request['wLength'] = struct.unpack('<H',
bytes(self.transaction_data[6:8]))[0]
if self.transaction_data[0] & 0x80:
request['type'] = 'SETUP IN'
self.handle_request(1, 0)
else:
request['type'] = 'SETUP OUT'
self.handle_request(request['wLength'] == 0, 0)
# CONTROL, DATA stage
elif request['type'] == 'SETUP IN' and self.transaction_type == 'IN':
request['data'] += self.transaction_data
elif request['type'] == 'SETUP OUT' and self.transaction_type == 'OUT':
if self.handshake == 'ACK':
request['data'] += self.transaction_data
if request['wLength'] == len(request['data']):
self.handle_request(1, 0)
# CONTROL, STATUS stage
elif request['type'] == 'SETUP IN' and self.transaction_type == 'OUT':
self.handle_request(0, request_end)
elif request['type'] == 'SETUP OUT' and self.transaction_type == 'IN':
self.handle_request(0, request_end)
else:
return
return
def ts_from_samplenum(self, sample):
ts = float(sample) * self.secs_per_sample
return (int(ts), int((ts % 1.0) * 1e6))
def write_pcap_header(self):
if not self.wrote_pcap_header:
self.put(0, 0, self.out_binary, [0, self.pcap_global_header()])
self.wrote_pcap_header = True
def request_summary(self, request):
s = '['
if request['type'] in ('SETUP IN', 'SETUP OUT'):
for b in request['setup_data']:
s += ' %02X' % b
s += ' ]['
for b in request['data']:
s += ' %02X' % b
s += ' ] : %s' % request['handshake']
return s
def handle_request(self, request_start, request_end):
if request_start != 1 and request_end != 1:
return
self.write_pcap_header()
ep = self.transaction_ep
addr = self.transaction_addr
request = self.request[(addr, ep)]
ss, es, ss_data = request['ss'], request['es'], request['ss_data']
if self.in_request_start == 'submit':
ss_data = ss
if request_start == 1:
# Issue PCAP 'SUBMIT' packet.
ts = self.ts_from_samplenum(ss)
pkt = pcap_usb_pkt(request, ts, True)
self.putb(ss, [0, pkt.record_header()])
self.putb(ss, [0, pkt.packet()])
if request_end == 1:
# Write annotation.
summary = self.request_summary(request)
if request['type'] == 'SETUP IN':
self.putr(ss, es, [0, ['SETUP in: %s' % summary]])
elif request['type'] == 'SETUP OUT':
self.putr(ss, es, [1, ['SETUP out: %s' % summary]])
elif request['type'] == 'BULK IN':
self.putr(ss_data, es, [2, ['BULK in: %s' % summary]])
elif request['type'] == 'BULK OUT':
self.putr(ss, es, [3, ['BULK out: %s' % summary]])
# Issue PCAP 'COMPLETE' packet.
ts = self.ts_from_samplenum(es)
pkt = pcap_usb_pkt(request, ts, False)
self.putb(ss, [0, pkt.record_header()])
self.putb(ss, [0, pkt.packet()])
del self.request[(addr, ep)]
def decode(self, ss, es, data):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
ptype, pdata = data
# We only care about certain packet types for now.
if ptype not in ('PACKET'):
return
pcategory, pname, pinfo = pdata
if pcategory == 'TOKEN':
if pname == 'SOF':
return
if self.transaction_state == 'TOKEN RECEIVED':
transaction_timeout = self.es_transaction
# Token length is 35 bits, timeout is 16..18 bit times
# (USB 2.0 7.1.19.1).
transaction_timeout += int((self.es_transaction - self.ss_transaction) / 2)
if ss > transaction_timeout:
self.es_transaction = transaction_timeout
self.handshake = 'timeout'
self.handle_transfer()
self.transaction_state = 'IDLE'
if self.transaction_state != 'IDLE':
self.putr(ss, es, [4, ['ERR: received %s token in state %s' %
(pname, self.transaction_state)]])
return
sync, pid, addr, ep, crc5 = pinfo
self.transaction_data = []
self.ss_transaction = ss
self.es_transaction = es
self.transaction_state = 'TOKEN RECEIVED'
self.transaction_ep = ep
if ep > 0 and pname == 'IN':
self.transaction_ep = ep + 0x80
self.transaction_addr = addr
self.transaction_type = pname # IN OUT SETUP
elif pcategory == 'DATA':
if self.transaction_state != 'TOKEN RECEIVED':
self.putr(ss, es, [4, ['ERR: received %s token in state %s' %
(pname, self.transaction_state)]])
return
self.transaction_data = pinfo[2]
self.transaction_state = 'DATA RECEIVED'
elif pcategory == 'HANDSHAKE':
if self.transaction_state not in ('TOKEN RECEIVED', 'DATA RECEIVED'):
self.putr(ss, es, [4, ['ERR: received %s token in state %s' %
(pname, self.transaction_state)]])
return
self.handshake = pname
self.transaction_state = 'IDLE'
self.es_transaction = es
self.handle_transfer()
elif pname == 'PRE':
return
else:
self.putr(ss, es, [4, ['ERR: received unhandled %s token in state %s' %
(pname, self.transaction_state)]])
return
| gpl-3.0 |
chouseknecht/ansible | test/units/modules/network/f5/test_bigiq_regkey_pool.py | 21 | 3056 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigiq_regkey_pool import ModuleParameters
from library.modules.bigiq_regkey_pool import ApiParameters
from library.modules.bigiq_regkey_pool import ModuleManager
from library.modules.bigiq_regkey_pool import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigiq_regkey_pool import ModuleParameters
from ansible.modules.network.f5.bigiq_regkey_pool import ApiParameters
from ansible.modules.network.f5.bigiq_regkey_pool import ModuleManager
from ansible.modules.network.f5.bigiq_regkey_pool import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
description='this is a description'
)
p = ModuleParameters(params=args)
assert p.description == 'this is a description'
def test_api_parameters(self):
args = load_fixture('load_regkey_license_pool.json')
p = ApiParameters(params=args)
assert p.description == 'this is a description'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='bar baz',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
cloudify-cosmo/cloudify-cli | cloudify_cli/commands/ldap.py | 1 | 3144 | from ..cli import cfy
from ..exceptions import CloudifyCliError
@cfy.group(name='ldap')
@cfy.options.common_options
@cfy.assert_manager_active()
def ldap():
"""Set LDAP authenticator.
"""
pass
@ldap.command(name='set',
short_help='Set the manager to use the LDAP authenticator.')
@cfy.options.ldap_server
@cfy.options.ldap_username
@cfy.options.ldap_password
@cfy.options.ldap_domain
@cfy.options.ldap_is_active_directory
@cfy.options.ldap_dn_extra
@cfy.options.ldap_ca_path
@cfy.options.ldap_base_dn
@cfy.options.ldap_group_dn
@cfy.options.ldap_bind_format
@cfy.options.ldap_user_filter
@cfy.options.ldap_group_member_filter
@cfy.options.ldap_attribute_email
@cfy.options.ldap_attribute_first_name
@cfy.options.ldap_attribute_last_name
@cfy.options.ldap_attribute_uid
@cfy.options.ldap_attribute_group_membership
@cfy.options.ldap_nested_levels
@cfy.pass_client()
@cfy.pass_logger
def set(ldap_server,
ldap_username,
ldap_password,
ldap_domain,
ldap_is_active_directory,
ldap_dn_extra,
ldap_base_dn,
ldap_group_dn,
ldap_bind_format,
ldap_user_filter,
ldap_group_member_filter,
ldap_attribute_email,
ldap_attribute_first_name,
ldap_attribute_last_name,
ldap_attribute_uid,
ldap_attribute_group_membership,
ldap_nested_levels,
ldap_ca_path,
client,
logger):
if (ldap_username and not ldap_password) \
or (ldap_password and not ldap_username):
raise CloudifyCliError(
'Must either set both username and password, or neither. '
'Note that an empty username or password is invalid')
logger.info('Setting the Cloudify manager authenticator to use LDAP..')
client.ldap.set(ldap_server=ldap_server,
ldap_username=ldap_username,
ldap_password=ldap_password,
ldap_is_active_directory=ldap_is_active_directory,
ldap_domain=ldap_domain,
ldap_dn_extra=ldap_dn_extra,
ldap_base_dn=ldap_base_dn,
ldap_group_dn=ldap_group_dn,
ldap_bind_format=ldap_bind_format,
ldap_user_filter=ldap_user_filter,
ldap_group_member_filter=ldap_group_member_filter,
ldap_attribute_email=ldap_attribute_email,
ldap_attribute_first_name=ldap_attribute_first_name,
ldap_attribute_last_name=ldap_attribute_last_name,
ldap_attribute_uid=ldap_attribute_uid,
ldap_attribute_group_membership=(
ldap_attribute_group_membership
),
ldap_nested_levels=ldap_nested_levels,
ldap_ca_path=ldap_ca_path)
logger.info('LDAP authentication set successfully')
@ldap.command(name='status',
short_help='Get the manager LDAP status (enabled/disabled).')
@cfy.pass_client()
@cfy.pass_logger
def status(client, logger):
logger.info(client.ldap.get_status())
| apache-2.0 |
dscdac/Proyecto-IV-modulo2 | lib/python2.7/site-packages/setuptools/command/bdist_wininst.py | 325 | 2283 | from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
import os, sys
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command (self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
if not hasattr(_bdist_wininst, 'get_installer_filename'):
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
return installer_name
# get_installer_filename()
| gpl-2.0 |
CristinaCristescu/root | interpreter/llvm/src/utils/lit/lit/util.py | 25 | 9291 | import errno
import itertools
import math
import os
import platform
import signal
import subprocess
import sys
import threading
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except UnicodeError:
return str(bytes)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
# With more than 32 processes, process creation often fails with
# "Too many open files". FIXME: Check if there's a better fix.
return min(ncpus, 32)
return 1 # Default
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out,_ = p.communicate()
return convert_string(out)
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False;
return True;
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title = 'Items'):
items.sort(key = lambda item: item[1])
maxValue = max([v for _,v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name,v in items:
bin = min(int(N * v/maxValue), N-1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print('\nSlowest %s:' % title)
print(hr)
for name,value in items[-20:]:
print('%.2fs: %s' % (value, name))
print('\n%s Times:' % title)
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print("[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits*2 + 1)))
print(hr)
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print("[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]" % (
pDigits, pfDigits, i*barH, pDigits, pfDigits, (i+1)*barH,
'*'*w, ' '*(barW-w), cDigits, len(row), cDigits, len(items)))
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out,err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def usePlatformSdkOnDarwin(config, lit_config):
# On Darwin, support relocatable SDKs by providing Clang with a
# default system root path.
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-path'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
sdk_path = out
lit_config.note('using SDKROOT: %r' % sdk_path)
config.environment['SDKROOT'] = sdk_path
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
| lgpl-2.1 |
yoer/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/auth/tests/test_forms.py | 104 | 16206 | from __future__ import unicode_literals
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.forms.fields import Field, CharField
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.error_messages['duplicate_username'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True):
with translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
UserModel = get_user_model()
username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
form = MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistant_email(self):
"""
Test nonexistant email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field._has_changed('aaa', 'bbb'))
| apache-2.0 |
watspidererik/testenv | flask/lib/python2.7/site-packages/sqlparse/engine/__init__.py | 119 | 2286 | # Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""filter"""
from sqlparse import lexer
from sqlparse.engine import grouping
from sqlparse.engine.filter import StatementFilter
# XXX remove this when cleanup is complete
Filter = object
class FilterStack(object):
def __init__(self):
self.preprocess = []
self.stmtprocess = []
self.postprocess = []
self.split_statements = False
self._grouping = False
def _flatten(self, stream):
for token in stream:
if token.is_group():
for t in self._flatten(token.tokens):
yield t
else:
yield token
def enable_grouping(self):
self._grouping = True
def full_analyze(self):
self.enable_grouping()
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
if self.preprocess:
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)
if self._grouping:
def _group(stream):
for stmt in stream:
grouping.group(stmt)
yield stmt
stream = _group(stream)
if self.stmtprocess:
def _run1(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
stream = _run1(stream)
if self.postprocess:
def _run2(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
stream = _run2(stream)
return stream
| mit |
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/django/contrib/staticfiles/storage.py | 103 | 14788 | from __future__ import unicode_literals
import hashlib
import json
import os
import posixpath
import re
from collections import OrderedDict
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.six.moves.urllib.parse import (
unquote, urldefrag, urlsplit, urlunsplit,
)
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Returns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
hashed_name = self.stored_name(clean_name)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[self.hash_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally store the processed paths
self.hashed_files.update(hashed_files)
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def stored_name(self, name):
hash_key = self.hash_key(name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
# store the hashed name if there was a miss, e.g.
# when the files are still processed
self.hashed_files[hash_key] = cache_name
return cache_name
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
def __init__(self, *args, **kwargs):
super(ManifestFilesMixin, self).__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode('utf-8')
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version', None)
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
all_post_processed = super(ManifestFilesMixin,
self).post_process(*args, **kwargs)
for post_processed in all_post_processed:
yield post_processed
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode('utf-8')
self._save(self.manifest_name, ContentFile(contents))
class _MappingCache(object):
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key, None)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| apache-2.0 |
slotlocker2/tscreen | tscreen.py | 1 | 3436 | #!/usr/bin/env python
from PyQt4 import QtGui, QtCore
import time, sys
class OverlayWidget(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
# CONSTANTS
self.FONT_SIZE = 35 # The font size of the time displayed
# Timer which fires a signal every 1 second. Used to update the label.
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_time)
self.timer.start(1000)
# Qt Label which displays the time and the current color being painted.
self.timeLabel = QtGui.QLabel(self)
self.timeLabel.setTextFormat(QtCore.Qt.RichText)
self.timeLabel.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.timeLabel.setAutoFillBackground(True)
# Layout to make sure the Label stays at the center of the screen in case of resize events.
self.hLayout = QtGui.QGridLayout(self)
self.hLayout.setContentsMargins(0,0,0,0) # Make sure the Label extends all the way upto the broders of the widget.
self.setLayout(self.hLayout)
self.hLayout.addWidget(self.timeLabel,0,0)
# Palette to paint the background of the label
self.palette = QtGui.QPalette()
# Passing a hint to the window manager to keep the window above other windows. It is just a hint and does not ensure the window stays on top of other windows.
self.setWindowFlags( self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint )
# Setting the curor to blank before going full screen.
self.setCursor(QtCore.Qt.BlankCursor)
# showFullSceen used instead of show() to start the app in full screen mode.
self.showFullScreen()
def determine_time_color(self):
''' Returns a list containing the time and the color as strings'''
return [time.strftime("%H:%M:%S") ,time.strftime("%H%M%S")]
def update_time(self):
''' Updates the label with the current time and the color'''
timecolorArray = self.determine_time_color()
# <font> </font> does not work since it goes all the way up to only 7. Using CSS inside span to get the required size.
self.labelText = "<b><span style='font-size:%dpt'>" % self.FONT_SIZE+ timecolorArray[0] + "</span></b>" + "<br>" + "#" + "<i>" + timecolorArray[1] + "</i>"
self.timeLabel.setText(self.labelText)
self.update_color(timecolorArray[1])
def update_color(self,colorstring):
# Converting hex to decimal
Re = int(colorstring[0:2],16)
Gr = int(colorstring[2:4],16)
Bl = int(colorstring[4:6],16)
role = QtGui.QPalette.Background
self.palette.setColor(role, QtGui.QColor(Re, Gr, Bl))
role = QtGui.QPalette.WindowText
self.palette.setColor(role, QtGui.QColor(143, 143, 143)) # Gray goes with most of the colors and is not as much of an eyesore as white is.
self.timeLabel.setPalette(self.palette)
#Subclassing the keyPressEvent to close the widget once the Escape Key is pressed.
def keyPressEvent(self,qKeyEvent):
if qKeyEvent.key() == QtCore.Qt.Key_Escape:
self.close()
else:
return QtGui.QWidget.keyPressEvent(self,qKeyEvent)
def main():
app = QtGui.QApplication(sys.argv)
appins = OverlayWidget()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/sphinx/websupport/search/__init__.py | 3 | 4903 | # -*- coding: utf-8 -*-
"""
sphinx.websupport.search
~~~~~~~~~~~~~~~~~~~~~~~~
Server side search support for the web support package.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import text_type
class BaseSearch(object):
def __init__(self, path):
pass
def init_indexing(self, changed=[]):
"""Called by the builder to initialize the search indexer. `changed`
is a list of pagenames that will be reindexed. You may want to remove
these from the search index before indexing begins.
:param changed: a list of pagenames that will be re-indexed
"""
pass
def finish_indexing(self):
"""Called by the builder when writing has been completed. Use this
to perform any finalization or cleanup actions after indexing is
complete.
"""
pass
def feed(self, pagename, filename, title, doctree):
"""Called by the builder to add a doctree to the index. Converts the
`doctree` to text and passes it to :meth:`add_document`. You probably
won't want to override this unless you need access to the `doctree`.
Override :meth:`add_document` instead.
:param pagename: the name of the page to be indexed
:param filename: the name of the original source file
:param title: the title of the page to be indexed
:param doctree: is the docutils doctree representation of the page
"""
self.add_document(pagename, filename, title, doctree.astext())
def add_document(self, pagename, filename, title, text):
"""Called by :meth:`feed` to add a document to the search index.
This method should should do everything necessary to add a single
document to the search index.
`pagename` is name of the page being indexed. It is the combination
of the source files relative path and filename,
minus the extension. For example, if the source file is
"ext/builders.rst", the `pagename` would be "ext/builders". This
will need to be returned with search results when processing a
query.
:param pagename: the name of the page being indexed
:param filename: the name of the original source file
:param title: the page's title
:param text: the full text of the page
"""
raise NotImplementedError()
def query(self, q):
"""Called by the web support api to get search results. This method
compiles the regular expression to be used when :meth:`extracting
context <extract_context>`, then calls :meth:`handle_query`. You
won't want to override this unless you don't want to use the included
:meth:`extract_context` method. Override :meth:`handle_query` instead.
:param q: the search query string.
"""
self.context_re = re.compile('|'.join(q.split()), re.I)
return self.handle_query(q)
def handle_query(self, q):
"""Called by :meth:`query` to retrieve search results for a search
query `q`. This should return an iterable containing tuples of the
following format::
(<path>, <title>, <context>)
`path` and `title` are the same values that were passed to
:meth:`add_document`, and `context` should be a short text snippet
of the text surrounding the search query in the document.
The :meth:`extract_context` method is provided as a simple way
to create the `context`.
:param q: the search query
"""
raise NotImplementedError()
def extract_context(self, text, length=240):
"""Extract the context for the search query from the document's
full `text`.
:param text: the full text of the document to create the context for
:param length: the length of the context snippet to return.
"""
res = self.context_re.search(text)
if res is None:
return ''
context_start = max(res.start() - int(length/2), 0)
context_end = context_start + length
context = ''.join([context_start > 0 and '...' or '',
text[context_start:context_end],
context_end < len(text) and '...' or ''])
try:
return text_type(context, errors='ignore')
except TypeError:
return context
def context_for_searchtool(self):
"""Required by the HTML builder."""
return {}
def get_js_stemmer_rawcode(self):
"""Required by the HTML builder."""
return None
# The built-in search adapters.
SEARCH_ADAPTERS = {
'xapian': ('xapiansearch', 'XapianSearch'),
'whoosh': ('whooshsearch', 'WhooshSearch'),
'null': ('nullsearch', 'NullSearch'),
}
| gpl-3.0 |
jk1/intellij-community | python/lib/Lib/textwrap.py | 85 | 14867 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id: textwrap.py 46863 2006-06-11 19:42:51Z tim.peters $"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils).
try:
True, False
except NameError:
(True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
"""
chunks = self.wordsep_re.split(text)
chunks = filter(None, chunks)
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
space_left = max(width - cur_len, 1)
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| apache-2.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.3/django/contrib/gis/db/backends/spatialite/base.py | 55 | 4480 | from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (
_sqlite_extract, _sqlite_date_trunc, _sqlite_regexp, _sqlite_format_dtdelta,
connection_created, Database, DatabaseWrapper as SQLiteDatabaseWrapper,
SQLiteCursorWrapper)
from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient
from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation
from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection
from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations
class DatabaseWrapper(SQLiteDatabaseWrapper):
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.creation = SpatiaLiteCreation(self)
self.introspection = SpatiaLiteIntrospection(self)
def _cursor(self):
if self.connection is None:
## The following is the same as in django.db.backends.sqlite3.base ##
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured("Please fill out the database NAME in the settings module before using the database.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
self.connection.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
connection_created.send(sender=self.__class__, connection=self)
## From here on, customized for GeoDjango ##
# Enabling extension loading on the SQLite connection.
try:
self.connection.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured('The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.'
)
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = self.connection.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception, msg:
raise ImproperlyConfigured('Unable to load the SpatiaLite library extension '
'"%s" because: %s' % (self.spatialite_lib, msg))
return cur
else:
return self.connection.cursor(factory=SQLiteCursorWrapper)
| apache-2.0 |
jsgf/xen | tools/xm-test/tests/xapi/04_xapi-data_uri_handling.py | 38 | 2566 | #!/usr/bin/python
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009 flonatel GmbH & Co. KG
#============================================================================
#
# This file contains test cases for checking the data URI
# functionality:
# kernel and ramdisk are both checked with original uri,
# file uri and data uri (in every constallation)
#
import copy
from xen.util.fileuri import schemes, scheme_data, scheme_file
from XmTestLib import *
from XmTestLib.network_utils import *
from XmTestLib.XenAPIDomain import XmTestAPIDomain
kernel_orig_uri = arch.configDefaults['kernel']
ramdisk_orig_uri = arch.configDefaults['ramdisk']
kernel_data_uri = scheme_data.create_from_file(kernel_orig_uri)
ramdisk_data_uri = scheme_data.create_from_file(ramdisk_orig_uri)
kernel_file_uri = scheme_file.encode(kernel_orig_uri)
ramdisk_file_uri = scheme_file.encode(ramdisk_orig_uri)
config = copy.copy(arch.configDefaults)
for kernel in (kernel_orig_uri, kernel_data_uri, kernel_file_uri):
for ramdisk in (ramdisk_orig_uri, ramdisk_data_uri, ramdisk_file_uri):
config['kernel'] = kernel
config['ramdisk'] = ramdisk
print("Using kernel='%s' ramdisk='%s'" % (kernel[:100], ramdisk[:100]))
try:
guest = XmTestAPIDomain(baseConfig = config)
console = guest.start()
except DomainError, e:
if verbose:
print("Failed to create test domain because: %s" % e.extra)
FAIL(str(e))
try:
run = console.runCmd("ls /")
if run['return'] > 0:
FAIL("Could not start host")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
guest.closeConsole()
guest.stop()
| gpl-2.0 |
prasaianooz/pip | pip/_vendor/colorama/ansi.py | 442 | 2304 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + "A"
def DOWN(self, n=1):
return CSI + str(n) + "B"
def FORWARD(self, n=1):
return CSI + str(n) + "C"
def BACK(self, n=1):
return CSI + str(n) + "D"
def POS(self, x=1, y=1):
return CSI + str(y) + ";" + str(x) + "H"
def set_title(title):
return OSC + "2;" + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + "J"
def clear_line(mode=2):
return CSI + str(mode) + "K"
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
Cursor = AnsiCursor()
| mit |
navotsil/Open-Knesset | simple/management/commands/parse_government_bill_pdf/extra/display_selection.py | 15 | 3234 | """ Code to look at pdf's and check the text selection mechanism
of poppler. Left here for future reference (not used by any management
command).
"""
import os
import itertools
import gtk
import goocanvas
import gobject
import poppler
import read_gov_law_proposal as gov
import pdftotext_ext as ext
pdf=poppler.document_new_from_file('file://%s/538.pdf'%os.getcwd(),password=None)
def squares(width, height, n_wide, n_high):
dx = float(width) / n_wide
dy = float(height) / n_high
for j in xrange(n_high):
for i in xrange(n_wide):
yield (dx*i, dy*j, dx,dy)
def enlarging_square_range(start, height, end_width, n):
for i in xrange(n+1):
yield (start[0], start[1], end_width * i/n, height)
def find_middle_at_y(page, start, height, the_end):
rects = [(start[0], start[1], w, height) for w in [0, the_end]]
def getlen((x,y,w,h)):
return len(gov.get_text(page, gov.rect(x, y, w, h)))
vals = [getlen((x,y,w,h)) for x, y, w, h in rects]
min_val, max_val = vals
middle = rects[0]
for i in xrange(10):
if vals[0] == vals[1]:
break
middle = (start[0], start[1], (rects[0][2]+rects[1][2])/2, height)
middle_len = getlen(middle)
if middle_len == vals[1]:
vals[1], rects[1] = middle_len, middle
elif middle_len == vals[0]:
vals[0], rects[0] = middle_len, middle
else:
print "not a normal stretch at iteration %s" % i
return (-1, -1)
#import pdb; pdb.set_trace()
return middle[2], i
def find_column_separation(page):
middles = [find_middle_at_y(page, (0, y)) for y in xrange(0,1000,100)]
return middles
def map_the_desert((width, height), square_to_text, square_iter, text_offset_iter=None):
window, canvas = make_widget()
if text_offset_iter is None:
text_offset_iter = repeat((0,0))
texts = []
for x,y,w,h in square_iter:
dx, dy = text_offset_iter.next()
txt = square_to_text(x,y,w,h)
texts.append(txt)
rect = goocanvas.Rect(x=x+dx,y=y+dy,width=w,height=h)
text_widget = goocanvas.Text(text=len(txt), x=x+w/2+dx,y=y+h/2+dy)
canvas.get_root_item().add_child(rect)
canvas.get_root_item().add_child(text_widget)
return texts
def cover1(page, N=10):
return map_the_desert(page, squares(width, height, N, N))
def stretch(use_ext, page_description, start, height, end_width, N=10):
if use_ext:
filename, page_num = page_description
width, height = get_page(filename, page_num).get_size()
square_to_text = lambda x, y, w, h, filename=filename, page_num=page_num: ext.pdftotext(filename=filename,first=page_num+1, last=page_num+1, x=x, y=y, w=w, h=h)
else:
page = page_description
width, height = page.get_size()
square_to_text = lambda x, y, w, h, page=page: pypoppler_text_from_page(page, x, y, w, h)
return map_the_desert((width, height),
square_to_text,
enlarging_square_range(start, height, end_width, N),
itertools.cycle([(0,-10),(0,10)])
)
def make_widget():
w = gtk.Window()
c = goocanvas.Canvas()
w.add(c)
w.show_all()
return w, c
| bsd-3-clause |
petebachant/daqmx | examples/experiment_run.py | 1 | 3389 | # -*- coding: utf-8 -*-
"""
Created on Tue May 21 21:06:37 2013
@author: Pete
This program commands a National Instruments counter output device to send a
pulse after a target position (read from the ACS controller) is reached.
"""
import daqmx
import acsc
import time
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
def main():
taskhandle = daqmx.TaskHandle()
daqmx.CreateTask("", taskhandle)
phys_chan = "cDAQ9188-16D66BBMod3/ctr2"
globalchan = "VecPulse"
#Timing parameters
rate = 200 # Pulse rate in Hz
initialdelay = 0
lowtime = 1/rate/2
hightime = 1/rate/2
# daqmx.CreateCOPulseChanTime(taskhandle, phys_chan, "", daqmx.Val_Seconds,
# daqmx.Val_Low, initialdelay, lowtime, hightime,
# False)
daqmx.AddGlobalChansToTask(taskhandle, globalchan)
daqmx.CfgImplicitTiming(taskhandle, daqmx.Val_FiniteSamps, 1)
# Set up communication with motion controller
simulator = False
# Parameters for plotting
plotting = False
plot_dynamic = False
if simulator == True:
hcomm = acsc.OpenCommDirect()
else:
hcomm = acsc.OpenCommEthernetTCP("10.0.0.100", 701)
axis = 5
buffno = 7
target = 0
timeout = 10
# Initialize arrays for storing time and position
t = np.array(0)
x = np.array(0)
if plotting == True and plot_dynamic == True:
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
# plt.xlim(0, timeout)
# plt.ylim(0, target)
line, = ax.plot([], [])
fig.show()
if hcomm == acsc.INVALID:
print "Cannot connect to controller. Error:", acsc.GetLastError()
else:
# acsc.Enable(hcomm, axis)
rpos = acsc.GetRPosition(hcomm, axis)
x = rpos
t0 = time.time()
if simulator == True:
acsc.ToPoint(hcomm, 0, axis, target+50000)
else:
acsc.RunBuffer(hcomm, buffno, None, None)
while True:
rpos = acsc.GetRPosition(hcomm, axis)
if rpos >= target: break
x = np.append(x, rpos)
t = np.append(t, time.time() - t0)
if plotting == True and plot_dynamic == True:
line.set_xdata(t)
line.set_ydata(x)
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
print "Axis is", acsc.GetAxisState(hcomm, axis)+'...'
if time.time() - t0 > timeout:
print "Motion timeout"
print "Final axis position:", rpos
break
time.sleep(0.001)
print "Target reached. Sending trigger pulse to", globalchan + "..."
daqmx.StartTask(taskhandle, fatalerror=False)
daqmx.WaitUntilTaskDone(taskhandle, timeout=10, fatalerror=False)
daqmx.ClearTask(taskhandle, fatalerror=False)
acsc.CloseComm(hcomm)
print "Triggered at", np.max(x)
if plotting == True:
if plot_dynamic == False:
plt.plot(t, x)
plt.show()
return t, x
if __name__ == "__main__":
t, x = main() | gpl-2.0 |
etingof/pyasn1-modules | pyasn1_modules/rfc2634.py | 13 | 9425 | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
# Modified by Russ Housley to add a map for use with opentypes.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Enhanced Security Services for S/MIME
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc2634.txt
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedval
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5280
MAX = float('inf')
ContentType = rfc5652.ContentType
IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
PolicyInformation = rfc5280.PolicyInformation
GeneralNames = rfc5280.GeneralNames
CertificateSerialNumber = rfc5280.CertificateSerialNumber
# Signing Certificate Attribute
# Warning: It is better to use SigningCertificateV2 from RFC 5035
id_aa_signingCertificate = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.12')
class Hash(univ.OctetString):
pass # SHA-1 hash of entire certificate; RFC 5035 supports other hash algorithms
class IssuerSerial(univ.Sequence):
pass
IssuerSerial.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', GeneralNames()),
namedtype.NamedType('serialNumber', CertificateSerialNumber())
)
class ESSCertID(univ.Sequence):
pass
ESSCertID.componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', Hash()),
namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
)
class SigningCertificate(univ.Sequence):
pass
SigningCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('certs', univ.SequenceOf(
componentType=ESSCertID())),
namedtype.OptionalNamedType('policies', univ.SequenceOf(
componentType=PolicyInformation()))
)
# Mail List Expansion History Attribute
id_aa_mlExpandHistory = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.3')
ub_ml_expansion_history = univ.Integer(64)
class EntityIdentifier(univ.Choice):
pass
EntityIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier())
)
class MLReceiptPolicy(univ.Choice):
pass
MLReceiptPolicy.componentType = namedtype.NamedTypes(
namedtype.NamedType('none', univ.Null().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('insteadOf', univ.SequenceOf(
componentType=GeneralNames()).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('inAdditionTo', univ.SequenceOf(
componentType=GeneralNames()).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class MLData(univ.Sequence):
pass
MLData.componentType = namedtype.NamedTypes(
namedtype.NamedType('mailListIdentifier', EntityIdentifier()),
namedtype.NamedType('expansionTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('mlReceiptPolicy', MLReceiptPolicy())
)
class MLExpansionHistory(univ.SequenceOf):
pass
MLExpansionHistory.componentType = MLData()
MLExpansionHistory.sizeSpec = constraint.ValueSizeConstraint(1, ub_ml_expansion_history)
# ESS Security Label Attribute
id_aa_securityLabel = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.2')
ub_privacy_mark_length = univ.Integer(128)
ub_security_categories = univ.Integer(64)
ub_integer_options = univ.Integer(256)
class ESSPrivacyMark(univ.Choice):
pass
ESSPrivacyMark.componentType = namedtype.NamedTypes(
namedtype.NamedType('pString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_privacy_mark_length))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class SecurityClassification(univ.Integer):
pass
SecurityClassification.subtypeSpec=constraint.ValueRangeConstraint(0, ub_integer_options)
SecurityClassification.namedValues = namedval.NamedValues(
('unmarked', 0),
('unclassified', 1),
('restricted', 2),
('confidential', 3),
('secret', 4),
('top-secret', 5)
)
class SecurityPolicyIdentifier(univ.ObjectIdentifier):
pass
class SecurityCategory(univ.Sequence):
pass
SecurityCategory.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class SecurityCategories(univ.SetOf):
pass
SecurityCategories.componentType = SecurityCategory()
SecurityCategories.sizeSpec = constraint.ValueSizeConstraint(1, ub_security_categories)
class ESSSecurityLabel(univ.Set):
pass
ESSSecurityLabel.componentType = namedtype.NamedTypes(
namedtype.NamedType('security-policy-identifier', SecurityPolicyIdentifier()),
namedtype.OptionalNamedType('security-classification', SecurityClassification()),
namedtype.OptionalNamedType('privacy-mark', ESSPrivacyMark()),
namedtype.OptionalNamedType('security-categories', SecurityCategories())
)
# Equivalent Labels Attribute
id_aa_equivalentLabels = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.9')
class EquivalentLabels(univ.SequenceOf):
pass
EquivalentLabels.componentType = ESSSecurityLabel()
# Content Identifier Attribute
id_aa_contentIdentifier = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.7')
class ContentIdentifier(univ.OctetString):
pass
# Content Reference Attribute
id_aa_contentReference = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.10')
class ContentReference(univ.Sequence):
pass
ContentReference.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
namedtype.NamedType('originatorSignatureValue', univ.OctetString())
)
# Message Signature Digest Attribute
id_aa_msgSigDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.5')
class MsgSigDigest(univ.OctetString):
pass
# Content Hints Attribute
id_aa_contentHint = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.4')
class ContentHints(univ.Sequence):
pass
ContentHints.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('contentDescription', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('contentType', ContentType())
)
# Receipt Request Attribute
class AllOrFirstTier(univ.Integer):
pass
AllOrFirstTier.namedValues = namedval.NamedValues(
('allReceipts', 0),
('firstTierRecipients', 1)
)
class ReceiptsFrom(univ.Choice):
pass
ReceiptsFrom.componentType = namedtype.NamedTypes(
namedtype.NamedType('allOrFirstTier', AllOrFirstTier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('receiptList', univ.SequenceOf(
componentType=GeneralNames()).subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_aa_receiptRequest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.1')
ub_receiptsTo = univ.Integer(16)
class ReceiptRequest(univ.Sequence):
pass
ReceiptRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
namedtype.NamedType('receiptsFrom', ReceiptsFrom()),
namedtype.NamedType('receiptsTo', univ.SequenceOf(componentType=GeneralNames()).subtype(sizeSpec=constraint.ValueSizeConstraint(1, ub_receiptsTo)))
)
# Receipt Content Type
class ESSVersion(univ.Integer):
pass
ESSVersion.namedValues = namedval.NamedValues(
('v1', 1)
)
id_ct_receipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.1')
class Receipt(univ.Sequence):
pass
Receipt.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', ESSVersion()),
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
namedtype.NamedType('originatorSignatureValue', univ.OctetString())
)
# Map of Attribute Type to the Attribute structure is added to the
# ones that are in rfc5652.py
_cmsAttributesMapUpdate = {
id_aa_signingCertificate: SigningCertificate(),
id_aa_mlExpandHistory: MLExpansionHistory(),
id_aa_securityLabel: ESSSecurityLabel(),
id_aa_equivalentLabels: EquivalentLabels(),
id_aa_contentIdentifier: ContentIdentifier(),
id_aa_contentReference: ContentReference(),
id_aa_msgSigDigest: MsgSigDigest(),
id_aa_contentHint: ContentHints(),
id_aa_receiptRequest: ReceiptRequest(),
}
rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
# Map of Content Type OIDs to Content Types is added to the
# ones that are in rfc5652.py
_cmsContentTypesMapUpdate = {
id_ct_receipt: Receipt(),
}
rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
| bsd-2-clause |
raphael0202/spaCy | spacy/nl/word_sets.py | 1 | 1272 | # coding: utf8
from __future__ import unicode_literals
# Stop words are retrieved from http://www.damienvanholten.com/downloads/dutch-stop-words.txt
STOP_WORDS = set("""
aan af al alles als altijd andere
ben bij
daar dan dat de der deze die dit doch doen door dus
een eens en er
ge geen geweest
haar had heb hebben heeft hem het hier hij hoe hun
iemand iets ik in is
ja je
kan kon kunnen
maar me meer men met mij mijn moet
na naar niet niets nog nu
of om omdat ons ook op over
reeds
te tegen toch toen tot
u uit uw
van veel voor
want waren was wat we wel werd wezen wie wij wil worden
zal ze zei zelf zich zij zijn zo zonder zou
""".split())
# Number words
NUM_WORDS = set("""
nul een één twee drie vier vijf zes zeven acht negen tien elf twaalf dertien
veertien twintig dertig veertig vijftig zestig zeventig tachtig negentig honderd
duizend miljoen miljard biljoen biljard triljoen triljard
""".split())
# Ordinal words
ORDINAL_WORDS = set("""
eerste tweede derde vierde vijfde zesde zevende achtste negende tiende elfde
twaalfde dertiende veertiende twintigste dertigste veertigste vijftigste
zestigste zeventigste tachtigste negentigste honderdste duizendste miljoenste
miljardste biljoenste biljardste triljoenste triljardste
""".split())
| mit |
aosagie/spark | dev/create-release/generate-contributors.py | 35 | 11338 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script automates the process of creating release notes.
import os
import re
import sys
from releaseutils import *
# You must set the following before use!
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
RELEASE_TAG = os.environ.get("RELEASE_TAG", "v1.2.0-rc2")
PREVIOUS_RELEASE_TAG = os.environ.get("PREVIOUS_RELEASE_TAG", "v1.1.0")
# If the release tags are not provided, prompt the user to provide them
while not tag_exists(RELEASE_TAG):
RELEASE_TAG = raw_input("Please provide a valid release tag: ")
while not tag_exists(PREVIOUS_RELEASE_TAG):
print("Please specify the previous release tag.")
PREVIOUS_RELEASE_TAG = raw_input(
"For instance, if you are releasing v1.2.0, you should specify v1.1.0: ")
# Gather commits found in the new tag but not in the old tag.
# This filters commits based on both the git hash and the PR number.
# If either is present in the old tag, then we ignore the commit.
print("Gathering new commits between tags %s and %s" % (PREVIOUS_RELEASE_TAG, RELEASE_TAG))
release_commits = get_commits(RELEASE_TAG)
previous_release_commits = get_commits(PREVIOUS_RELEASE_TAG)
previous_release_hashes = set()
previous_release_prs = set()
for old_commit in previous_release_commits:
previous_release_hashes.add(old_commit.get_hash())
if old_commit.get_pr_number():
previous_release_prs.add(old_commit.get_pr_number())
new_commits = []
for this_commit in release_commits:
this_hash = this_commit.get_hash()
this_pr_number = this_commit.get_pr_number()
if this_hash in previous_release_hashes:
continue
if this_pr_number and this_pr_number in previous_release_prs:
continue
new_commits.append(this_commit)
if not new_commits:
sys.exit("There are no new commits between %s and %s!" % (PREVIOUS_RELEASE_TAG, RELEASE_TAG))
# Prompt the user for confirmation that the commit range is correct
print("\n==================================================================================")
print("JIRA server: %s" % JIRA_API_BASE)
print("Release tag: %s" % RELEASE_TAG)
print("Previous release tag: %s" % PREVIOUS_RELEASE_TAG)
print("Number of commits in this range: %s" % len(new_commits))
print("")
def print_indented(_list):
for x in _list:
print(" %s" % x)
if yesOrNoPrompt("Show all commits?"):
print_indented(new_commits)
print("==================================================================================\n")
if not yesOrNoPrompt("Does this look correct?"):
sys.exit("Ok, exiting")
# Filter out special commits
releases = []
maintenance = []
reverts = []
nojiras = []
filtered_commits = []
def is_release(commit_title):
return ("[release]" in commit_title.lower() or
"preparing spark release" in commit_title.lower() or
"preparing development version" in commit_title.lower() or
"CHANGES.txt" in commit_title)
def is_maintenance(commit_title):
return "maintenance" in commit_title.lower() or \
"manually close" in commit_title.lower()
def has_no_jira(commit_title):
return not re.findall("SPARK-[0-9]+", commit_title.upper())
def is_revert(commit_title):
return "revert" in commit_title.lower()
def is_docs(commit_title):
return re.findall("docs*", commit_title.lower()) or \
"programming guide" in commit_title.lower()
for c in new_commits:
t = c.get_title()
if not t:
continue
elif is_release(t):
releases.append(c)
elif is_maintenance(t):
maintenance.append(c)
elif is_revert(t):
reverts.append(c)
elif is_docs(t):
filtered_commits.append(c) # docs may not have JIRA numbers
elif has_no_jira(t):
nojiras.append(c)
else:
filtered_commits.append(c)
# Warn against ignored commits
if releases or maintenance or reverts or nojiras:
print("\n==================================================================================")
if releases:
print("Found %d release commits" % len(releases))
if maintenance:
print("Found %d maintenance commits" % len(maintenance))
if reverts:
print("Found %d revert commits" % len(reverts))
if nojiras:
print("Found %d commits with no JIRA" % len(nojiras))
print("* Warning: these commits will be ignored.\n")
if yesOrNoPrompt("Show ignored commits?"):
if releases:
print("Release (%d)" % len(releases))
print_indented(releases)
if maintenance:
print("Maintenance (%d)" % len(maintenance))
print_indented(maintenance)
if reverts:
print("Revert (%d)" % len(reverts))
print_indented(reverts)
if nojiras:
print("No JIRA (%d)" % len(nojiras))
print_indented(nojiras)
print("==================== Warning: the above commits will be ignored ==================\n")
prompt_msg = "%d commits left to process after filtering. Ok to proceed?" % len(filtered_commits)
if not yesOrNoPrompt(prompt_msg):
sys.exit("Ok, exiting.")
# Keep track of warnings to tell the user at the end
warnings = []
# Mapping from the invalid author name to its associated JIRA issues
# E.g. andrewor14 -> set("SPARK-2413", "SPARK-3551", "SPARK-3471")
invalid_authors = {}
# Populate a map that groups issues and components by author
# It takes the form: Author name -> { Contribution type -> Spark components }
# For instance,
# {
# 'Andrew Or': {
# 'bug fixes': ['windows', 'core', 'web ui'],
# 'improvements': ['core']
# },
# 'Tathagata Das' : {
# 'bug fixes': ['streaming']
# 'new feature': ['streaming']
# }
# }
#
author_info = {}
jira_options = {"server": JIRA_API_BASE}
jira_client = JIRA(options=jira_options)
print("\n=========================== Compiling contributor list ===========================")
for commit in filtered_commits:
_hash = commit.get_hash()
title = commit.get_title()
issues = re.findall("SPARK-[0-9]+", title.upper())
author = commit.get_author()
date = get_date(_hash)
# If the author name is invalid, keep track of it along
# with all associated issues so we can translate it later
if is_valid_author(author):
author = capitalize_author(author)
else:
if author not in invalid_authors:
invalid_authors[author] = set()
for issue in issues:
invalid_authors[author].add(issue)
# Parse components from the commit title, if any
commit_components = find_components(title, _hash)
# Populate or merge an issue into author_info[author]
def populate(issue_type, components):
components = components or [CORE_COMPONENT] # assume core if no components provided
if author not in author_info:
author_info[author] = {}
if issue_type not in author_info[author]:
author_info[author][issue_type] = set()
for component in components:
author_info[author][issue_type].add(component)
# Find issues and components associated with this commit
for issue in issues:
try:
jira_issue = jira_client.issue(issue)
jira_type = jira_issue.fields.issuetype.name
jira_type = translate_issue_type(jira_type, issue, warnings)
jira_components = [translate_component(c.name, _hash, warnings)
for c in jira_issue.fields.components]
all_components = set(jira_components + commit_components)
populate(jira_type, all_components)
except Exception as e:
print("Unexpected error:", e)
# For docs without an associated JIRA, manually add it ourselves
if is_docs(title) and not issues:
populate("documentation", commit_components)
print(" Processed commit %s authored by %s on %s" % (_hash, author, date))
print("==================================================================================\n")
# Write to contributors file ordered by author names
# Each line takes the format " * Author name -- semi-colon delimited contributions"
# e.g. * Andrew Or -- Bug fixes in Windows, Core, and Web UI; improvements in Core
# e.g. * Tathagata Das -- Bug fixes and new features in Streaming
contributors_file = open(contributors_file_name, "w")
authors = author_info.keys()
authors.sort()
for author in authors:
contribution = ""
components = set()
issue_types = set()
for issue_type, comps in author_info[author].items():
components.update(comps)
issue_types.add(issue_type)
# If there is only one component, mention it only once
# e.g. Bug fixes, improvements in MLlib
if len(components) == 1:
contribution = "%s in %s" % (nice_join(issue_types), next(iter(components)))
# Otherwise, group contributions by issue types instead of modules
# e.g. Bug fixes in MLlib, Core, and Streaming; documentation in YARN
else:
contributions = ["%s in %s" % (issue_type, nice_join(comps))
for issue_type, comps in author_info[author].items()]
contribution = "; ".join(contributions)
# Do not use python's capitalize() on the whole string to preserve case
assert contribution
contribution = contribution[0].capitalize() + contribution[1:]
# If the author name is invalid, use an intermediate format that
# can be translated through translate-contributors.py later
# E.g. andrewor14/SPARK-3425/SPARK-1157/SPARK-6672
if author in invalid_authors and invalid_authors[author]:
author = author + "/" + "/".join(invalid_authors[author])
# line = " * %s -- %s" % (author, contribution)
line = author
contributors_file.write(line + "\n")
contributors_file.close()
print("Contributors list is successfully written to %s!" % contributors_file_name)
# Prompt the user to translate author names if necessary
if invalid_authors:
warnings.append("Found the following invalid authors:")
for a in invalid_authors:
warnings.append("\t%s" % a)
warnings.append("Please run './translate-contributors.py' to translate them.")
# Log any warnings encountered in the process
if warnings:
print("\n============ Warnings encountered while creating the contributor list ============")
for w in warnings:
print(w)
print("Please correct these in the final contributors list at %s." % contributors_file_name)
print("==================================================================================\n")
| apache-2.0 |
googleinterns/learnbase | learnbase/src/main/webapp/WEB-INF/Lib/mimify.py | 304 | 15021 | #! /usr/bin/env python
"""Mimification and unmimification of mail messages.
Decode quoted-printable parts of a mail message or encode using
quoted-printable.
Usage:
mimify(input, output)
unmimify(input, output, decode_base64 = 0)
to encode and decode respectively. Input and output may be the name
of a file or an open file object. Only a readline() method is used
on the input file, only a write() method is used on the output file.
When using file names, the input and output file names may be the
same.
Interactive usage:
mimify.py -e [infile [outfile]]
mimify.py -d [infile [outfile]]
to encode and decode respectively. Infile defaults to standard
input and outfile to standard output.
"""
# Configure
MAXLEN = 200 # if lines longer than this, encode as quoted-printable
CHARSET = 'ISO-8859-1' # default charset for non-US-ASCII mail
QUOTE = '> ' # string replies are quoted with
# End configure
import re
import warnings
warnings.warn("the mimify module is deprecated; use the email package instead",
DeprecationWarning, 2)
__all__ = ["mimify","unmimify","mime_encode_header","mime_decode_header"]
qp = re.compile('^content-transfer-encoding:\\s*quoted-printable', re.I)
base64_re = re.compile('^content-transfer-encoding:\\s*base64', re.I)
mp = re.compile('^content-type:.*multipart/.*boundary="?([^;"\n]*)', re.I|re.S)
chrset = re.compile('^(content-type:.*charset=")(us-ascii|iso-8859-[0-9]+)(".*)', re.I|re.S)
he = re.compile('^-*\n')
mime_code = re.compile('=([0-9a-f][0-9a-f])', re.I)
mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I)
repl = re.compile('^subject:\\s+re: ', re.I)
class File:
"""A simple fake file object that knows about limited read-ahead and
boundaries. The only supported method is readline()."""
def __init__(self, file, boundary):
self.file = file
self.boundary = boundary
self.peek = None
def readline(self):
if self.peek is not None:
return ''
line = self.file.readline()
if not line:
return line
if self.boundary:
if line == self.boundary + '\n':
self.peek = line
return ''
if line == self.boundary + '--\n':
self.peek = line
return ''
return line
class HeaderFile:
def __init__(self, file):
self.file = file
self.peek = None
def readline(self):
if self.peek is not None:
line = self.peek
self.peek = None
else:
line = self.file.readline()
if not line:
return line
if he.match(line):
return line
while 1:
self.peek = self.file.readline()
if len(self.peek) == 0 or \
(self.peek[0] != ' ' and self.peek[0] != '\t'):
return line
line = line + self.peek
self.peek = None
def mime_decode(line):
"""Decode a single line of quoted-printable text to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_code.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
chr(int(res.group(1), 16))
pos = res.end(0)
return newline + line[pos:]
def mime_decode_header(line):
"""Decode a header line to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_head.search(line, pos)
if res is None:
break
match = res.group(1)
# convert underscores to spaces (before =XX conversion!)
match = ' '.join(match.split('_'))
newline = newline + line[pos:res.start(0)] + mime_decode(match)
pos = res.end(0)
return newline + line[pos:]
def unmimify_part(ifile, ofile, decode_base64 = 0):
"""Convert a quoted-printable part of a MIME mail message to 8bit."""
multipart = None
quoted_printable = 0
is_base64 = 0
is_repl = 0
if ifile.boundary and ifile.boundary[:2] == QUOTE:
prefix = QUOTE
else:
prefix = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
return
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
line = mime_decode_header(line)
if qp.match(line):
quoted_printable = 1
continue # skip this header
if decode_base64 and base64_re.match(line):
is_base64 = 1
continue
ofile.write(pref + line)
if not prefix and repl.match(line):
# we're dealing with a reply message
is_repl = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
break
if is_repl and (quoted_printable or multipart):
is_repl = 0
# read body
while 1:
line = ifile.readline()
if not line:
return
line = re.sub(mime_head, '\\1', line)
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
## if is_repl and len(line) >= 4 and line[:4] == QUOTE+'--' and line[-3:] != '--\n':
## multipart = line[:-1]
while multipart:
if line == multipart + '--\n':
ofile.write(pref + line)
multipart = None
line = None
break
if line == multipart + '\n':
ofile.write(pref + line)
nifile = File(ifile, multipart)
unmimify_part(nifile, ofile, decode_base64)
line = nifile.peek
if not line:
# premature end of file
break
continue
# not a boundary between parts
break
if line and quoted_printable:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
if line and is_base64 and not pref:
import base64
line = base64.decodestring(line)
if line:
ofile.write(pref + line)
def unmimify(infile, outfile, decode_base64 = 0):
"""Convert quoted-printable parts of a MIME mail message to 8bit."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
unmimify_part(nifile, ofile, decode_base64)
ofile.flush()
mime_char = re.compile('[=\177-\377]') # quote these chars in body
mime_header_char = re.compile('[=?\177-\377]') # quote these in header
def mime_encode(line, header):
"""Code a single line as quoted-printable.
If header is set, quote some extra characters."""
if header:
reg = mime_header_char
else:
reg = mime_char
newline = ''
pos = 0
if len(line) >= 5 and line[:5] == 'From ':
# quote 'From ' at the start of a line for stupid mailers
newline = ('=%02x' % ord('F')).upper()
pos = 1
while 1:
res = reg.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
('=%02x' % ord(res.group(0))).upper()
pos = res.end(0)
line = newline + line[pos:]
newline = ''
while len(line) >= 75:
i = 73
while line[i] == '=' or line[i-1] == '=':
i = i - 1
i = i + 1
newline = newline + line[:i] + '=\n'
line = line[i:]
return newline + line
mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)(?=[ \t)]|\n)')
def mime_encode_header(line):
"""Code a single header line as quoted-printable."""
newline = ''
pos = 0
while 1:
res = mime_header.search(line, pos)
if res is None:
break
newline = '%s%s%s=?%s?Q?%s?=' % \
(newline, line[pos:res.start(0)], res.group(1),
CHARSET, mime_encode(res.group(2), 1))
pos = res.end(0)
return newline + line[pos:]
mv = re.compile('^mime-version:', re.I)
cte = re.compile('^content-transfer-encoding:', re.I)
iso_char = re.compile('[\177-\377]')
def mimify_part(ifile, ofile, is_mime):
"""Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0
multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0
header = []
header_end = ''
message = []
message_end = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
break
if not must_quote_header and iso_char.search(line):
must_quote_header = 1
if mv.match(line):
is_mime = 1
if cte.match(line):
has_cte = 1
if qp.match(line):
is_qp = 1
elif base64_re.match(line):
is_base64 = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
header_end = line
break
header.append(line)
# read body
while 1:
line = ifile.readline()
if not line:
break
if multipart:
if line == multipart + '--\n':
message_end = line
break
if line == multipart + '\n':
message_end = line
break
if is_base64:
message.append(line)
continue
if is_qp:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
message.append(line)
if not has_iso_chars:
if iso_char.search(line):
has_iso_chars = must_quote_body = 1
if not must_quote_body:
if len(line) > MAXLEN:
must_quote_body = 1
# convert and output header and body
for line in header:
if must_quote_header:
line = mime_encode_header(line)
chrset_res = chrset.match(line)
if chrset_res:
if has_iso_chars:
# change us-ascii into iso-8859-1
if chrset_res.group(2).lower() == 'us-ascii':
line = '%s%s%s' % (chrset_res.group(1),
CHARSET,
chrset_res.group(3))
else:
# change iso-8859-* into us-ascii
line = '%sus-ascii%s' % chrset_res.group(1, 3)
if has_cte and cte.match(line):
line = 'Content-Transfer-Encoding: '
if is_base64:
line = line + 'base64\n'
elif must_quote_body:
line = line + 'quoted-printable\n'
else:
line = line + '7bit\n'
ofile.write(line)
if (must_quote_header or must_quote_body) and not is_mime:
ofile.write('Mime-Version: 1.0\n')
ofile.write('Content-Type: text/plain; ')
if has_iso_chars:
ofile.write('charset="%s"\n' % CHARSET)
else:
ofile.write('charset="us-ascii"\n')
if must_quote_body and not has_cte:
ofile.write('Content-Transfer-Encoding: quoted-printable\n')
ofile.write(header_end)
for line in message:
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
ofile.write(message_end)
line = message_end
while multipart:
if line == multipart + '--\n':
# read bit after the end of the last part
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
if line == multipart + '\n':
nifile = File(ifile, multipart)
mimify_part(nifile, ofile, 1)
line = nifile.peek
if not line:
# premature end of file
break
ofile.write(line)
continue
# unexpectedly no multipart separator--copy rest of file
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
def mimify(infile, outfile):
"""Convert 8bit parts of a MIME mail message to quoted-printable."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
mimify_part(nifile, ofile, 0)
ofile.flush()
import sys
if __name__ == '__main__' or (len(sys.argv) > 0 and sys.argv[0] == 'mimify'):
import getopt
usage = 'Usage: mimify [-l len] -[ed] [infile [outfile]]'
decode_base64 = 0
opts, args = getopt.getopt(sys.argv[1:], 'l:edb')
if len(args) not in (0, 1, 2):
print usage
sys.exit(1)
if (('-e', '') in opts) == (('-d', '') in opts) or \
((('-b', '') in opts) and (('-d', '') not in opts)):
print usage
sys.exit(1)
for o, a in opts:
if o == '-e':
encode = mimify
elif o == '-d':
encode = unmimify
elif o == '-l':
try:
MAXLEN = int(a)
except (ValueError, OverflowError):
print usage
sys.exit(1)
elif o == '-b':
decode_base64 = 1
if len(args) == 0:
encode_args = (sys.stdin, sys.stdout)
elif len(args) == 1:
encode_args = (args[0], sys.stdout)
else:
encode_args = (args[0], args[1])
if decode_base64:
encode_args = encode_args + (decode_base64,)
encode(*encode_args)
| apache-2.0 |
googleads/google-ads-python | google/ads/googleads/v7/common/types/frequency_cap.py | 1 | 3156 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.enums.types import frequency_cap_event_type
from google.ads.googleads.v7.enums.types import frequency_cap_level
from google.ads.googleads.v7.enums.types import frequency_cap_time_unit
__protobuf__ = proto.module(
package="google.ads.googleads.v7.common",
marshal="google.ads.googleads.v7",
manifest={"FrequencyCapEntry", "FrequencyCapKey",},
)
class FrequencyCapEntry(proto.Message):
r"""A rule specifying the maximum number of times an ad (or some
set of ads) can be shown to a user over a particular time
period.
Attributes:
key (google.ads.googleads.v7.common.types.FrequencyCapKey):
The key of a particular frequency cap. There
can be no more than one frequency cap with the
same key.
cap (int):
Maximum number of events allowed during the
time range by this cap.
"""
key = proto.Field(proto.MESSAGE, number=1, message="FrequencyCapKey",)
cap = proto.Field(proto.INT32, number=3, optional=True,)
class FrequencyCapKey(proto.Message):
r"""A group of fields used as keys for a frequency cap.
There can be no more than one frequency cap with the same key.
Attributes:
level (google.ads.googleads.v7.enums.types.FrequencyCapLevelEnum.FrequencyCapLevel):
The level on which the cap is to be applied
(e.g. ad group ad, ad group). The cap is applied
to all the entities of this level.
event_type (google.ads.googleads.v7.enums.types.FrequencyCapEventTypeEnum.FrequencyCapEventType):
The type of event that the cap applies to
(e.g. impression).
time_unit (google.ads.googleads.v7.enums.types.FrequencyCapTimeUnitEnum.FrequencyCapTimeUnit):
Unit of time the cap is defined at (e.g. day,
week).
time_length (int):
Number of time units the cap lasts.
"""
level = proto.Field(
proto.ENUM,
number=1,
enum=frequency_cap_level.FrequencyCapLevelEnum.FrequencyCapLevel,
)
event_type = proto.Field(
proto.ENUM,
number=3,
enum=frequency_cap_event_type.FrequencyCapEventTypeEnum.FrequencyCapEventType,
)
time_unit = proto.Field(
proto.ENUM,
number=2,
enum=frequency_cap_time_unit.FrequencyCapTimeUnitEnum.FrequencyCapTimeUnit,
)
time_length = proto.Field(proto.INT32, number=5, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
AlbertoPeon/invenio | modules/bibsched/lib/bibtaskex.py | 26 | 3653 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Bibliographic Task Example.
Demonstrates BibTask <-> BibSched connectivity, signal handling,
error handling, etc.
"""
__revision__ = "$Id$"
import sys
import time
from invenio.bibtask import task_init, write_message, task_set_option, \
task_get_option, task_update_progress, task_has_option, \
task_get_task_param, task_sleep_now_if_required
def fib(n):
"""Returns Fibonacci number for 'n'."""
out = 1
if n >= 2:
out = fib(n-2) + fib(n-1)
return out
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ('-n', '--number'):
task_set_option('number', value)
return True
return False
"""
if key in ('-n', '--number'):
task_set_option('number', value)
return True
elif key in ('-e', '--error'):
task_set_option('error', True)
return True
return False
def task_run_core():
"""Runs the task by fetching arguments from the BibSched task queue. This is
what BibSched will be invoking via daemon call.
The task prints Fibonacci numbers for up to NUM on the stdout, and some
messages on stderr.
Return 1 in case of success and 0 in case of failure."""
n = int(task_get_option('number'))
write_message("Printing %d Fibonacci numbers." % n, verbose=9)
for i in range(0, n):
if i > 0 and i % 4 == 0:
write_message("Error: water in the CPU. Ignoring and continuing.", sys.stderr, verbose=3)
elif i > 0 and i % 5 == 0:
write_message("Error: floppy drive dropped on the floor. Ignoring and continuing.", sys.stderr)
if task_get_option('error'):
1 / 0
write_message("fib(%d)=%d" % (i, fib(i)))
task_update_progress("Done %d out of %d." % (i, n))
task_sleep_now_if_required(can_stop_too=True)
time.sleep(1)
task_update_progress("Done %d out of %d." % (n, n))
return 1
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibtaskex',
authorization_msg="BibTaskEx Task Submission",
help_specific_usage="""\
-n, --number Print Fibonacci numbers for up to NUM. [default=30]
-e, --error Raise an error from time to time
""",
version=__revision__,
specific_params=("n:e",
["number=", "error"]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core)
### okay, here we go:
if __name__ == '__main__':
main()
| gpl-2.0 |
aldariz/Sick-Beard | lib/tvdb_api/tests/test_tvdb_api.py | 18 | 17826 | #!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""Unittests for tvdb_api
"""
import os
import sys
import datetime
import unittest
# Force parent directory onto path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import tvdb_api
import tvdb_ui
from tvdb_api import (tvdb_shownotfound, tvdb_seasonnotfound,
tvdb_episodenotfound, tvdb_attributenotfound)
class test_tvdb_basic(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_different_case(self):
"""Checks the auto-correction of show names is working.
It should correct the weirdly capitalised 'sCruBs' to 'Scrubs'
"""
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
def test_spaces(self):
"""Checks shownames with spaces
"""
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
def test_numeric(self):
"""Checks numeric show names
"""
self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.')
self.assertEquals(self.t['24']['seriesname'], '24')
def test_show_iter(self):
"""Iterating over a show returns each seasons
"""
self.assertEquals(
len(
[season for season in self.t['Life on Mars']]
),
2
)
def test_season_iter(self):
"""Iterating over a show returns episodes
"""
self.assertEquals(
len(
[episode for episode in self.t['Life on Mars'][1]]
),
8
)
def test_get_episode_overview(self):
"""Checks episode overview is retrieved correctly.
"""
self.assertEquals(
self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(
'When a new copy of Doral, a Cylon who had been previously'),
True
)
def test_get_parent(self):
"""Check accessing series from episode instance
"""
show = self.t['Battlestar Galactica (2003)']
season = show[1]
episode = show[1][1]
self.assertEquals(
season.show,
show
)
self.assertEquals(
episode.season,
season
)
self.assertEquals(
episode.season.show,
show
)
def test_no_season(self):
show = self.t['Katekyo Hitman Reborn']
print tvdb_api
print show[1][1]
class test_tvdb_errors(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_seasonnotfound(self):
"""Checks exception is thrown when season doesn't exist.
"""
self.assertRaises(tvdb_seasonnotfound, lambda:self.t['CNNNN'][10][1])
def test_shownotfound(self):
"""Checks exception is thrown when episode doesn't exist.
"""
self.assertRaises(tvdb_shownotfound, lambda:self.t['the fake show thingy'])
def test_episodenotfound(self):
"""Checks exception is raised for non-existent episode
"""
self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30])
def test_attributenamenotfound(self):
"""Checks exception is thrown for if an attribute isn't found.
"""
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])
class test_tvdb_search(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_search_len(self):
"""There should be only one result matching
"""
self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1)
def test_search_checkname(self):
"""Checks you can get the episode name of a search result
"""
self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')
self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')
def test_search_multiresults(self):
"""Checks search can return multiple results
"""
self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)
def test_search_no_params_error(self):
"""Checks not supplying search info raises TypeError"""
self.assertRaises(
TypeError,
lambda: self.t['Scrubs'].search()
)
def test_search_season(self):
"""Checks the searching of a single season"""
self.assertEquals(
len(self.t['Scrubs'][1].search("First")),
3
)
def test_search_show(self):
"""Checks the searching of an entire show"""
self.assertEquals(
len(self.t['CNNNN'].search('CNNNN', key='episodename')),
3
)
def test_aired_on(self):
"""Tests airedOn show method"""
sr = self.t['Scrubs'].airedOn(datetime.date(2001, 10, 2))
self.assertEquals(len(sr), 1)
self.assertEquals(sr[0]['episodename'], u'My First Day')
class test_tvdb_data(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_episode_data(self):
"""Check the firstaired value is retrieved
"""
self.assertEquals(
self.t['lost']['firstaired'],
'2004-09-22'
)
class test_tvdb_misc(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_repr_show(self):
"""Check repr() of Season
"""
self.assertEquals(
repr(self.t['CNNNN']),
"<Show Chaser Non-Stop News Network (CNNNN) (containing 3 seasons)>"
)
def test_repr_season(self):
"""Check repr() of Season
"""
self.assertEquals(
repr(self.t['CNNNN'][1]),
"<Season instance (containing 9 episodes)>"
)
def test_repr_episode(self):
"""Check repr() of Episode
"""
self.assertEquals(
repr(self.t['CNNNN'][1][1]),
"<Episode 01x01 - Terror Alert>"
)
def test_have_all_languages(self):
"""Check valid_languages is up-to-date (compared to languages.xml)
"""
et = self.t._getetsrc(
"http://thetvdb.com/api/%s/languages.xml" % (
self.t.config['apikey']
)
)
languages = [x.find("abbreviation").text for x in et.findall("Language")]
self.assertEquals(
sorted(languages),
sorted(self.t.config['valid_languages'])
)
class test_tvdb_languages(unittest.TestCase):
def test_episode_name_french(self):
"""Check episode data is in French (language="fr")
"""
t = tvdb_api.Tvdb(cache = True, language = "fr")
self.assertEquals(
t['scrubs'][1][1]['episodename'],
"Mon premier jour"
)
self.assertTrue(
t['scrubs']['overview'].startswith(
u"J.D. est un jeune m\xe9decin qui d\xe9bute"
)
)
def test_episode_name_spanish(self):
"""Check episode data is in Spanish (language="es")
"""
t = tvdb_api.Tvdb(cache = True, language = "es")
self.assertEquals(
t['scrubs'][1][1]['episodename'],
"Mi Primer Dia"
)
self.assertTrue(
t['scrubs']['overview'].startswith(
u'Scrubs es una divertida comedia'
)
)
def test_multilanguage_selection(self):
"""Check selected language is used
"""
class SelectEnglishUI(tvdb_ui.BaseUI):
def selectSeries(self, allSeries):
return [x for x in allSeries if x['language'] == "en"][0]
class SelectItalianUI(tvdb_ui.BaseUI):
def selectSeries(self, allSeries):
return [x for x in allSeries if x['language'] == "it"][0]
t_en = tvdb_api.Tvdb(
cache=True,
custom_ui = SelectEnglishUI,
language = "en")
t_it = tvdb_api.Tvdb(
cache=True,
custom_ui = SelectItalianUI,
language = "it")
self.assertEquals(
t_en['dexter'][1][2]['episodename'], "Crocodile"
)
self.assertEquals(
t_it['dexter'][1][2]['episodename'], "Lacrime di coccodrillo"
)
class test_tvdb_unicode(unittest.TestCase):
def test_search_in_chinese(self):
"""Check searching for show with language=zh returns Chinese seriesname
"""
t = tvdb_api.Tvdb(cache = True, language = "zh")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(
type(show),
tvdb_api.Show
)
self.assertEquals(
show['seriesname'],
u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'
)
def test_search_in_all_languages(self):
"""Check search_all_languages returns Chinese show, with language=en
"""
t = tvdb_api.Tvdb(cache = True, search_all_languages = True, language="en")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(
type(show),
tvdb_api.Show
)
self.assertEquals(
show['seriesname'],
u'Virtues Of Harmony II'
)
class test_tvdb_banners(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = True)
def test_have_banners(self):
"""Check banners at least one banner is found
"""
self.assertEquals(
len(self.t['scrubs']['_banners']) > 0,
True
)
def test_banner_url(self):
"""Checks banner URLs start with http://
"""
for banner_type, banner_data in self.t['scrubs']['_banners'].items():
for res, res_data in banner_data.items():
for bid, banner_info in res_data.items():
self.assertEquals(
banner_info['_bannerpath'].startswith("http://"),
True
)
def test_episode_image(self):
"""Checks episode 'filename' image is fully qualified URL
"""
self.assertEquals(
self.t['scrubs'][1][1]['filename'].startswith("http://"),
True
)
def test_show_artwork(self):
"""Checks various image URLs within season data are fully qualified
"""
for key in ['banner', 'fanart', 'poster']:
self.assertEquals(
self.t['scrubs'][key].startswith("http://"),
True
)
class test_tvdb_actors(unittest.TestCase):
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
self.assertTrue(
isinstance(
self.t['scrubs']['_actors'],
tvdb_api.Actors
)
)
def test_actors_has_actor(self):
"""Check show has at least one Actor
"""
self.assertTrue(
isinstance(
self.t['scrubs']['_actors'][0],
tvdb_api.Actor
)
)
def test_actor_has_name(self):
"""Check first actor has a name"""
self.assertEquals(
self.t['scrubs']['_actors'][0]['name'],
"Zach Braff"
)
def test_actor_image_corrected(self):
"""Check image URL is fully qualified
"""
for actor in self.t['scrubs']['_actors']:
if actor['image'] is not None:
# Actor's image can be None, it displays as the placeholder
# image on thetvdb.com
self.assertTrue(
actor['image'].startswith("http://")
)
class test_tvdb_doctest(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_doctest(self):
"""Check docstring examples works"""
import doctest
doctest.testmod(tvdb_api)
class test_tvdb_custom_caching(unittest.TestCase):
def test_true_false_string(self):
"""Tests setting cache to True/False/string
Basic tests, only checking for errors
"""
tvdb_api.Tvdb(cache = True)
tvdb_api.Tvdb(cache = False)
tvdb_api.Tvdb(cache = "/tmp")
def test_invalid_cache_option(self):
"""Tests setting cache to invalid value
"""
try:
tvdb_api.Tvdb(cache = 2.3)
except ValueError:
pass
else:
self.fail("Expected ValueError from setting cache to float")
def test_custom_urlopener(self):
class UsedCustomOpener(Exception):
pass
import urllib2
class TestOpener(urllib2.BaseHandler):
def default_open(self, request):
print request.get_method()
raise UsedCustomOpener("Something")
custom_opener = urllib2.build_opener(TestOpener())
t = tvdb_api.Tvdb(cache = custom_opener)
try:
t['scrubs']
except UsedCustomOpener:
pass
else:
self.fail("Did not use custom opener")
class test_tvdb_by_id(unittest.TestCase):
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
self.assertEquals(
self.t[76156]['seriesname'],
'Scrubs'
)
class test_tvdb_zip(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True)
def test_get_series_from_zip(self):
"""
"""
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
def test_spaces_from_zip(self):
"""Checks shownames with spaces
"""
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
class test_tvdb_show_ordering(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t_dvd = None
t_air = None
def setUp(self):
if self.t_dvd is None:
self.t_dvd = tvdb_api.Tvdb(cache = True, useZip = True, dvdorder=True)
if self.t_air is None:
self.t_air = tvdb_api.Tvdb(cache = True, useZip = True)
def test_ordering(self):
"""Test Tvdb.search method
"""
self.assertEquals(u'The Train Job', self.t_air['Firefly'][1][1]['episodename'])
self.assertEquals(u'Serenity', self.t_dvd['Firefly'][1][1]['episodename'])
self.assertEquals(u'The Cat & the Claw (Part 1)', self.t_air['Batman The Animated Series'][1][1]['episodename'])
self.assertEquals(u'On Leather Wings', self.t_dvd['Batman The Animated Series'][1][1]['episodename'])
class test_tvdb_show_search(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True)
def test_search(self):
"""Test Tvdb.search method
"""
results = self.t.search("my name is earl")
all_ids = [x['seriesid'] for x in results]
self.assertTrue('75397' in all_ids)
class test_tvdb_alt_names(unittest.TestCase):
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
def test_1(self):
"""Tests basic access of series name alias
"""
results = self.t.search("Don't Trust the B---- in Apartment 23")
series = results[0]
self.assertTrue(
'Apartment 23' in series['aliasnames']
)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner = runner)
| gpl-3.0 |
GetSomeBlocks/Score_Soccer | resources/lib/IMDbPY/imdb/parser/local/personParser.py | 5 | 8552 | """
parser.local.personParser module (imdb package).
This module provides the functions used to parse the
information about people in a local installation of the
IMDb database.
Copyright 2004-2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.Movie import Movie
from imdb._exceptions import IMDbDataAccessError
from imdb.utils import re_titleRef, analyze_name, build_name, normalizeName, \
date_and_notes
from characterParser import getCharactersIDs
from utils import getRawData, getLabel, getFullIndex, latin2utf
def _parseList(l, prefix, mline=1):
"""Given a list of lines l, strips prefix and join consecutive lines
with the same prefix; if mline is True, there can be multiple info with
the same prefix, and the first line starts with 'prefix: * '."""
resl = []
reslapp = resl.append
ltmp = []
ltmpapp = ltmp.append
fistl = '%s: * ' % prefix
otherl = '%s: ' % prefix
if not mline:
fistl = fistl[:-2]
otherl = otherl[:-2]
firstlen = len(fistl)
otherlen = len(otherl)
parsing = 0
joiner = ' '.join
for line in l:
if line[:firstlen] == fistl:
parsing = 1
if ltmp:
reslapp(joiner(ltmp))
ltmp[:] = []
data = line[firstlen:].strip()
if data: ltmpapp(data)
elif mline and line[:otherlen] == otherl:
data = line[otherlen:].strip()
if data: ltmpapp(data)
else:
if ltmp:
reslapp(joiner(ltmp))
ltmp[:] = []
if parsing:
if ltmp: reslapp(joiner(ltmp))
break
return resl
def _buildGuests(gl):
"""Return a list of Movie objects from a list of GA lines."""
rl = []
rlapp = rl.append
for g in gl:
# When used by the imdbpy2sql.py script, latin_1 strings are passed.
if not isinstance(g, unicode):
g = unicode(g, 'latin_1', 'replace')
titl = re_titleRef.findall(g)
if len(titl) != 1: continue
note = u''
if g[-1] == ')':
opi = g.rfind('(episode')
if opi == -1: opi = g.rfind('(')
if opi != -1:
note = g[opi:].replace('_', '"').strip()
g = g[:opi].strip()
cr = u''
cri = g.find('_ (qv), as ')
if cri != -1:
cr = g[cri+11:].replace('[unknown]', u'').strip()
if cr and cr[-1] == ')':
opi = cr.rfind('(')
if opi != -1:
if note: note += ' '
note += cr[opi:]
cr = cr[:opi].strip()
# As you can see, we've no notion of the movieID, here.
m = Movie(title=titl[0], currentRole=cr, notes=note,
accessSystem='local')
rlapp(m)
return rl
def _parseBioBy(l):
"""Return a list of biographies."""
bios = []
biosappend = bios.append
tmpbio = []
tmpbioappend = tmpbio.append
joiner = ' '.join
for line in l:
if line[:4] == 'BG: ':
tmpbioappend(line[4:].strip())
elif line[:4] == 'BY: ':
if tmpbio:
biosappend(joiner(tmpbio) + '::' + line[4:].strip())
tmpbio[:] = []
# Cut mini biographies up to 2**16-1 chars, to prevent errors with
# some MySQL versions - when used by the imdbpy2sql.py script.
bios[:] = [bio[:65535] for bio in bios]
return bios
def _parseBiography(biol):
"""Parse the biographies.data file."""
res = {}
bio = ' '.join(_parseList(biol, 'BG', mline=0))
bio = _parseBioBy(biol)
if bio: res['mini biography'] = bio
for x in biol:
x4 = x[:4]
x6 = x[:6]
if x4 == 'DB: ':
date, notes = date_and_notes(x[4:])
if date:
res['birth date'] = date
if notes:
res['birth notes'] = notes
elif x4 == 'DD: ':
date, notes = date_and_notes(x[4:])
if date:
res['death date'] = date
if notes:
res['death notes'] = notes
elif x6 == 'SP: * ':
res.setdefault('spouse', []).append(x[6:].strip())
elif x4 == 'RN: ':
n = x[4:].strip()
if not n: continue
rn = build_name(analyze_name(n, canonical=1), canonical=1)
res['birth name'] = rn
elif x6 == 'AT: * ':
res.setdefault('articles', []).append(x[6:].strip())
elif x4 == 'HT: ':
res['height'] = x[4:].strip()
elif x6 == 'PT: * ':
res.setdefault('pictorials', []).append(x[6:].strip())
elif x6 == 'CV: * ':
res.setdefault('magazine covers', []).append(x[6:].strip())
elif x4 == 'NK: ':
res.setdefault('nick names', []).append(normalizeName(x[4:]))
elif x6 == 'PI: * ':
res.setdefault('portrayed', []).append(x[6:].strip())
elif x6 == 'SA: * ':
sal = x[6:].strip().replace(' -> ', '::')
res.setdefault('salary history', []).append(sal)
trl = _parseList(biol, 'TR')
if trl: res['trivia'] = trl
quotes = _parseList(biol, 'QU')
if quotes: res['quotes'] = quotes
otherworks = _parseList(biol, 'OW')
if otherworks: res['other works'] = otherworks
books = _parseList(biol, 'BO')
if books: res['books'] = books
agent = _parseList(biol, 'AG')
if agent: res['agent address'] = agent
wherenow = _parseList(biol, 'WN')
if wherenow: res['where now'] = wherenow[0]
biomovies = _parseList(biol, 'BT')
if biomovies: res['biographical movies'] = biomovies
guestapp = _buildGuests([x[6:].strip() for x in biol if x[:6] == 'GA: * '])
if guestapp: res['notable tv guest appearances'] = guestapp
tm = _parseList(biol, 'TM')
if tm: res['trademarks'] = tm
interv = _parseList(biol, 'IT')
if interv: res['interviews'] = interv
return res
def getBio(personID, indexF, dataF):
"""Get biography information for the given person."""
bioidx = getFullIndex(indexF, personID)
if bioidx is None: return {}
try:
fbio = open(dataF, 'r')
except IOError, e:
raise IMDbDataAccessError, str(e)
fbio.seek(bioidx)
fbio.readline()
rlines = []
while 1:
line = latin2utf(fbio.readline())
if not line or line[:4] == 'NM: ': break
rlines.append(line)
fbio.close()
return _parseBiography(rlines)
def getFilmography(dataF, indexF, keyF, attrIF, attrKF, offset,
charNF=None, doCast=0, doWriters=0):
"""Gather information from the given files about the
person entry found at offset; return a list of Movie objects,
with the relevant attributes."""
name, res = getRawData(dataF, offset, doCast, doWriters)
resList = []
for movie in res:
title = getLabel(movie['movieID'], indexF, keyF)
if not title: continue
curRole = movie.get('currentRole', u'')
roleID = None
if curRole and charNF:
curRole, roleID = getCharactersIDs(curRole, charNF)
m = Movie(title=title, movieID=movie['movieID'],
currentRole=curRole, roleID=roleID,
accessSystem='local')
if movie.has_key('attributeID'):
attr = getLabel(movie['attributeID'], attrIF, attrKF)
if attr: m.notes = attr
resList.append(m)
return resList
def getAkaNames(personID, akaDF, namesIF, namesKF):
"""Return a list of aka names."""
entries = getFullIndex(akaDF, personID, kind='akandb',
rindex=None, multi=1, default=[])
res = []
for entry in entries:
akaName = getLabel(entry[1], namesIF, namesKF)
if akaName: res.append(akaName)
return res
| mit |
openSUSE/docmanager | src/docmanager/cli/cmd_analyze.py | 1 | 1621 | #
# Copyright (c) 2015 SUSE Linux GmbH
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
def analyze_subcmd(subparsers, queryformat, filters, sort, quiet, stop_on_error, default_output, filesargs):
"""Create the 'analyze' subcommand
:param subparsers: Subparser for all subcommands
:param queryformat: The queryformat
:param dict filesargs: Dict for FILE argument
"""
panalyze = subparsers.add_parser('analyze',
aliases=['a'],
help='Analyzes the given XML files.'
)
panalyze.add_argument('-qf', '--queryformat', **queryformat)
panalyze.add_argument('-f', '--filter', **filters)
panalyze.add_argument('-s', '--sort', **sort)
panalyze.add_argument('--stop-on-error', **stop_on_error)
panalyze.add_argument('-q', '--quiet', **quiet)
panalyze.add_argument('-do', '--default-output', **default_output)
panalyze.add_argument("files", **filesargs)
| gpl-3.0 |
amirkdv/biseqt | tests/test_pw.py | 1 | 11185 | # -*- coding: utf-8 -*-
import pytest
from biseqt.sequence import Alphabet, Sequence
from biseqt.stochastics import rand_seq, MutationProcess
from biseqt.pw import Alignment, Aligner
from biseqt.pw import STD_MODE, BANDED_MODE
from biseqt.pw import GLOBAL, LOCAL, OVERLAP, B_OVERLAP
def test_projected_aln_len():
assert Alignment.projected_len('MMM', on='origin') == 3
assert Alignment.projected_len('MMM', on='mutant') == 3
assert Alignment.projected_len('SMS', on='origin') == 3
assert Alignment.projected_len('SMS', on='mutant') == 3
assert Alignment.projected_len('DMS', on='origin') == 3
assert Alignment.projected_len('DMS', on='mutant') == 2
assert Alignment.projected_len('IMS', on='origin') == 2
assert Alignment.projected_len('IMS', on='mutant') == 3
@pytest.mark.parametrize('alphabet',
[Alphabet('ACGT'), Alphabet(['00', '01'])],
ids=['one letter alphabet', 'two letter alphabet'])
def test_alignment_std_basic(alphabet):
S = alphabet.parse(alphabet[0] * 10)
with pytest.raises(AssertionError):
Alignment(S, S, 'MSSST') # illegal character
with pytest.raises(AssertionError):
Alignment(S, S, 'M', origin_start=len(S)) # illegal starting point
with pytest.raises(AssertionError):
Alignment(S, S, 'MM', origin_start=len(S)-1) # transcript too long
with Aligner(S, S) as aligner:
aligner.solve()
assert aligner.traceback().transcript == 'M' * len(S), \
'default mode is standard global alignment'
scores = aligner.table_scores()
assert len(scores) == len(S) and \
all([len(row) == len(S) for row in scores]), \
'scores array should have the correct shape'
assert max(max(row) for row in scores) == scores[-1][-1], \
'max score shouldbe at the bottom right corner'
with Aligner(S, S[:len(S) / 2]) as aligner:
aligner.solve()
alignment = aligner.traceback()
assert alignment.transcript.count('D') == len(S) / 2, \
'basic global alignment with gaps works'
assert '-' * (len(S) / 2) in str(alignment), \
'alignments have proper string representations'
junk = alphabet.parse(alphabet[1] * len(S))
origin, mutant = S + junk, junk + S
alignment = Alignment(origin, mutant, 'M' * len(S), mutant_start=len(S))
with Aligner(origin, mutant, alntype=LOCAL) as aligner:
aligner.solve()
assert alignment == aligner.traceback(), \
'basic local alignment works'
with Aligner(S, junk, alntype=LOCAL) as aligner:
assert aligner.solve() is None and aligner.traceback() is None, \
'alignment not found works'
with Aligner(origin, mutant, alntype=OVERLAP) as aligner:
aligner.solve()
assert aligner.traceback().transcript == 'M' * len(S), \
'basic overlap alignment works'
@pytest.mark.parametrize('alphabet',
[Alphabet('ACGT'), Alphabet(['00', '01'])],
ids=['one letter alphabet', 'two letter alphabet'])
def test_alignment_banded_basic(alphabet):
S = alphabet.parse(alphabet[0] * 10)
with pytest.raises(AssertionError):
Aligner(S, S, alnmode=BANDED_MODE, diag_range=(-len(S) - 1, 0))
with pytest.raises(AssertionError):
Aligner(S, S, alnmode=BANDED_MODE, diag_range=(0, len(S) + 1))
with Aligner(S, S, alnmode=BANDED_MODE, diag_range=(0, 0)) as aligner:
aligner.solve()
assert aligner.traceback() == Alignment(S, S, 'M' * len(S)), \
'basic global banded alignment works'
junk = alphabet.parse(alphabet[1] * len(S))
origin, mutant = S + junk, junk + S
alignment = Alignment(origin, mutant, 'M' * len(S), mutant_start=len(S))
with Aligner(origin, mutant, alnmode=BANDED_MODE, alntype=B_OVERLAP,
diag_range=(-2*len(S), 2*len(S)), ge_score=-1) as aligner:
aligner.solve()
assert alignment == aligner.traceback(), \
'basic overlap banded alignment works'
def test_alignment_banded_memory():
A = Alphabet('ACGT')
# pick sequences so large that cannot be aligned quadratically
L = int(1e6)
S = Sequence(A, (0,) * L)
T = Sequence(A, (1,) * L)
with Aligner(S, T, alnmode=BANDED_MODE, diag_range=(0, 0)) as aligner:
aligner.solve()
assert aligner.traceback().transcript == 'S' * L
noise_levels = [1e-2, 1e-1, 2e-1, 3e-1, 4e-1]
@pytest.mark.parametrize('err', noise_levels,
ids=['noise=%.1e' % l for l in noise_levels])
def test_alignment_std_global(err):
A = Alphabet('ACGT')
M = MutationProcess(A, subst_probs=err, go_prob=err, ge_prob=err)
subst_scores, (go_score, ge_score) = M.log_odds_scores()
S = rand_seq(A, 100)
T, tx = M.mutate(S)
mutation_aln = Alignment(S, T, tx)
mutation_score = mutation_aln.calculate_score(subst_scores, go_score,
ge_score)
aligner = Aligner(S, T, subst_scores=subst_scores, go_score=go_score,
ge_score=ge_score, alnmode=STD_MODE, alntype=GLOBAL)
with aligner:
reported_score = aligner.solve()
assert round(reported_score, 3) >= round(mutation_score, 3), \
'optimal alignment scores better than the known transcript'
alignment = aligner.traceback()
aln_score = alignment.calculate_score(subst_scores, go_score, ge_score)
assert round(aln_score, 3) == round(reported_score, 3), \
'The alignment score should be calculated correctly'
aligner_score = aligner.calculate_score(alignment)
assert round(aln_score, 3) == round(aligner_score, 3), \
'Aligner.calculate_score behaves like Alignment.calculate_score'
ori_len = Alignment.projected_len(alignment.transcript, on='origin')
mut_len = Alignment.projected_len(alignment.transcript, on='mutant')
assert ori_len == len(S) and mut_len == len(T), \
'Global alignments cover the entirety of both sequences'
def test_pw_truncate_to_matches():
A = Alphabet('ACGT')
S = A.parse('A' * 10 + 'T' * 10 + 'A' * 10)
T = A.parse('T' * 30)
tx = 'S' * 10 + 'M' * 10 + 'S' * 10
aln = Alignment(S, T, tx)
aln_truncated = aln.truncate_to_match()
assert aln_truncated.transcript == 'M' * 10, \
'truncated alignment for %s is %s' % (aln.transcript, 'M' * 10)
assert aln_truncated.origin_start == 10 and\
aln_truncated.mutant_start == 10, \
'truncated alignment should start at (10, 10)'
@pytest.mark.parametrize('err', noise_levels,
ids=['noise=%.1e' % l for l in noise_levels])
def test_alignment_std_local(err):
A = Alphabet('ACGT')
M = MutationProcess(A, subst_probs=err, go_prob=err, ge_prob=err)
subst_scores, (go_score, ge_score) = M.log_odds_scores()
S = rand_seq(A, 100)
T, tx = M.mutate(S)
T = A.parse('A' * 100) + T + A.parse('G' * 100)
mutation_aln = Alignment(S, T, tx)
mutation_score = mutation_aln.calculate_score(subst_scores, go_score,
ge_score)
aligner = Aligner(S, T, subst_scores=subst_scores, go_score=go_score,
ge_score=ge_score, alnmode=STD_MODE, alntype=LOCAL)
with aligner:
reported_score = aligner.solve()
assert round(reported_score, 3) >= round(mutation_score, 3), \
'optimal alignment scores better than the known transcript'
alignment = aligner.traceback()
aln_score = alignment.calculate_score(subst_scores, go_score, ge_score)
assert round(aln_score, 3) == round(reported_score, 3), \
'The alignment score should be calculated correctly'
ori_len = Alignment.projected_len(alignment.transcript, on='origin')
mut_len = Alignment.projected_len(alignment.transcript, on='mutant')
assert ori_len <= len(S) and mut_len < len(T), \
'Local alignments do not cover the entirety of both sequences'
def test_pw_render_basic():
A = Alphabet('ACGT')
S = A.parse('AACT')
aln = Alignment(S, S, 'M' * len(S))
assert aln.render_term(colored=False).count('\033') == 0, \
'colored output should allow being turned off'
assert aln.render_term(colored=True).count('\033') > 0, \
'colored output should allow being turned on'
# validate input
with pytest.raises(AssertionError):
aln.render_term(margin=-1)
with pytest.raises(AssertionError):
aln.render_term(term_width=5)
aln = Alignment(S + S, S + S, 'M' * len(S), origin_start=len(S))
no_margin = aln.render_term(margin=0, colored=False)
assert '[%d]' % len(S) in no_margin, 'margin should allow being turned off'
with_margin = aln.render_term(margin=1, colored=False)
assert '[%d]' % (len(S) - 1) in with_margin, \
'margin should allow being turned on'
# shouldn't choke on too large margins
full_margin = aln.render_term(margin=30, colored=False)
assert str(S) + '.' * len(S) in full_margin, 'overhanging margins work'
assert len(set(len(l) for l in full_margin.rstrip().split('\n'))) == 1, \
'both lines of the output should have the same length'
# deletion:
# AACT
# AG-T
aln = Alignment(S + S, A.parse('AGT'), 'MSDM', origin_start=len(S))
with_del = aln.render_term(colored=False)
assert 'AG-T' in with_del, 'deletions are represented by - in mutant'
lines = with_del.rstrip().split('\n')
assert lines[0].index('C') == lines[1].index('-'), \
'deleted content and - should be aligned'
# shouldn't crash when printing deletions with color
aln.render_term(colored=True)
# insertion:
# AAC-T
# AACGT
aln = Alignment(S + S, A.parse('AACGT'), 'MMMIM', origin_start=len(S))
with_ins = aln.render_term(colored=False)
assert 'AAC-T' in with_ins, 'insertions are represented by - in origin'
lines = with_ins.rstrip().split('\n')
assert lines[0].index('-') == lines[1].index('G'), \
'inserted content and - should be aligned'
# shouldn't crash when printing with color
with_ins = aln.render_term(colored=True)
def test_pw_render_width():
A = Alphabet('ACGT')
N = 100
S = A.parse('A' * (2 * N))
tx, term_width = 'M' * N, N/2
aln = Alignment(S, S, tx, origin_start=N)
render = aln.render_term(margin=2*N, colored=False, term_width=term_width)
line_lens = [len(l) for l in render.rstrip().split('\n')]
assert all(length <= term_width for length in line_lens), \
'terminal width should be adjustable'
assert any(length == term_width for length in line_lens), \
'terminal width should be fully used'
assert len(set(line_lens)) <= 2, \
'alignments longer than terminal width should work'
def test_pw_render_longlet():
A = Alphabet(['00', '11'])
aln = Alignment(A.parse('0011'), A.parse('11'), 'DM')
assert '--11' in aln.render_term(colored=False), \
'alphabets with > 1 long letters should be rendered properly'
| bsd-3-clause |
bswartz/cinder | cinder/tests/unit/api/v2/test_volumes.py | 1 | 75944 | # Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
from oslo_config import cfg
import six
from six.moves import range
from six.moves import urllib
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.v2 import volumes
from cinder import consistencygroup as consistencygroupAPI
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}'
DEFAULT_AZ = "zone1:host1"
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
self.patch(
'cinder.db.service_get_all', autospec=True,
return_value=stubs.stub_service_get_all_by_topic(None, None))
self.maxDiff = None
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
vol = self._vol_in_request_body()
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller()
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_type(self, mock_validate):
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=CONF.default_volume_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
vol = self._vol_in_request_body(volume_type="FakeTypeName")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when type name isn't valid
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
# Use correct volume type name
vol.update(dict(volume_type=CONF.default_volume_type))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
volume_id = res_dict['volume']['id']
self.assertEqual(1, len(res_dict))
# Use correct volume type id
vol.update(dict(volume_type=db_vol_type['id']))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
volume_id = res_dict['volume']['id']
self.assertEqual(1, len(res_dict))
vol_db = stubs.stub_volume(volume_id, volume_type={'name': vol_type})
vol_obj = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_db)
self.stubs.Set(volume_api.API, 'get_all',
lambda *args, **kwargs:
objects.VolumeList(objects=[vol_obj]))
# NOTE(geguileo): This is required because common get_by_id method in
# cinder.db.sqlalchemy.api caches the real get method.
db.sqlalchemy.api._GET_METHODS = {}
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
res_dict = self.controller.detail(req)
self.assertTrue(mock_validate.called)
def _vol_in_request_body(self,
size=stubs.DEFAULT_VOL_SIZE,
name=stubs.DEFAULT_VOL_NAME,
description=stubs.DEFAULT_VOL_DESCRIPTION,
availability_zone=DEFAULT_AZ,
snapshot_id=None,
source_volid=None,
source_replica=None,
consistencygroup_id=None,
volume_type=None,
image_ref=None,
image_id=None):
vol = {"size": size,
"name": name,
"description": description,
"availability_zone": availability_zone,
"snapshot_id": snapshot_id,
"source_volid": source_volid,
"source_replica": source_replica,
"consistencygroup_id": consistencygroup_id,
"volume_type": volume_type,
}
if image_id is not None:
vol['image_id'] = image_id
elif image_ref is not None:
vol['imageRef'] = image_ref
return vol
def _expected_vol_from_controller(
self,
size=stubs.DEFAULT_VOL_SIZE,
availability_zone=DEFAULT_AZ,
description=stubs.DEFAULT_VOL_DESCRIPTION,
name=stubs.DEFAULT_VOL_NAME,
consistencygroup_id=None,
source_volid=None,
snapshot_id=None,
metadata=None,
attachments=None,
volume_type=stubs.DEFAULT_VOL_TYPE,
status=stubs.DEFAULT_VOL_STATUS,
with_migration_status=False):
metadata = metadata or {}
attachments = attachments or []
volume = {'volume':
{'attachments': attachments,
'availability_zone': availability_zone,
'bootable': 'false',
'consistencygroup_id': consistencygroup_id,
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()),
'updated_at': datetime.datetime(
1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()),
'description': description,
'id': stubs.DEFAULT_VOL_ID,
'links':
[{'href': 'http://localhost/v2/%s/volumes/%s' % (
fake.PROJECT_ID, fake.VOLUME_ID),
'rel': 'self'},
{'href': 'http://localhost/%s/volumes/%s' % (
fake.PROJECT_ID, fake.VOLUME_ID),
'rel': 'bookmark'}],
'metadata': metadata,
'name': name,
'replication_status': 'disabled',
'multiattach': False,
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'status': status,
'user_id': fake.USER_ID,
'volume_type': volume_type,
'encrypted': False}}
if with_migration_status:
volume['volume']['migration_status'] = None
return volume
def _expected_volume_api_create_kwargs(self, snapshot=None,
availability_zone=DEFAULT_AZ,
source_volume=None):
return {'metadata': None,
'snapshot': snapshot,
'source_volume': source_volume,
'source_replica': None,
'consistencygroup': None,
'availability_zone': availability_zone,
'scheduler_hints': None,
'multiattach': False,
}
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full',
autospec=True)
@mock.patch.object(volume_api.API, 'get_snapshot', autospec=True)
@mock.patch.object(volume_api.API, 'create', autospec=True)
def test_volume_creation_from_snapshot(self, create, get_snapshot,
volume_type_get):
create.side_effect = stubs.stub_volume_api_create
get_snapshot.side_effect = stubs.stub_snapshot_get
volume_type_get.side_effect = stubs.stub_volume_type_get
snapshot_id = fake.SNAPSHOT_ID
vol = self._vol_in_request_body(snapshot_id=snapshot_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller(snapshot_id=snapshot_id)
self.assertEqual(ex, res_dict)
context = req.environ['cinder.context']
get_snapshot.assert_called_once_with(self.controller.volume_api,
context, snapshot_id)
kwargs = self._expected_volume_api_create_kwargs(
stubs.stub_snapshot(snapshot_id))
create.assert_called_once_with(self.controller.volume_api, context,
vol['size'], stubs.DEFAULT_VOL_NAME,
stubs.DEFAULT_VOL_DESCRIPTION, **kwargs)
@mock.patch.object(volume_api.API, 'get_snapshot', autospec=True)
def test_volume_creation_fails_with_invalid_snapshot(self, get_snapshot):
get_snapshot.side_effect = stubs.stub_snapshot_get
snapshot_id = fake.WILL_NOT_BE_FOUND_ID
vol = self._vol_in_request_body(snapshot_id=snapshot_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when snapshot cannot be found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_snapshot.assert_called_once_with(self.controller.volume_api,
context, snapshot_id)
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full',
autospec=True)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
@mock.patch.object(volume_api.API, 'create', autospec=True)
def test_volume_creation_from_source_volume(self, create, get_volume,
volume_type_get):
get_volume.side_effect = stubs.stub_volume_api_get
create.side_effect = stubs.stub_volume_api_create
volume_type_get.side_effect = stubs.stub_volume_type_get
source_volid = '2f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(source_volid=source_volid)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller(source_volid=source_volid)
self.assertEqual(ex, res_dict)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_volid)
db_vol = stubs.stub_volume(source_volid)
vol_obj = fake_volume.fake_volume_obj(context, **db_vol)
kwargs = self._expected_volume_api_create_kwargs(
source_volume=vol_obj)
create.assert_called_once_with(self.controller.volume_api, context,
vol['size'], stubs.DEFAULT_VOL_NAME,
stubs.DEFAULT_VOL_DESCRIPTION, **kwargs)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_volume(self,
get_volume):
get_volume.side_effect = stubs.stub_volume_get_notfound
source_volid = fake.VOLUME_ID
vol = self._vol_in_request_body(source_volid=source_volid)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when source volume cannot be found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_volid)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_replica(self,
get_volume):
get_volume.side_effect = stubs.stub_volume_get_notfound
source_replica = fake.VOLUME_ID
vol = self._vol_in_request_body(source_replica=source_replica)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when source replica cannot be found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_replica)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_replication_status(
self, get_volume):
get_volume.side_effect = stubs.stub_volume_get
source_replica = '2f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(source_replica=source_replica)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 400 when replication status is disabled.
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_replica)
@mock.patch.object(consistencygroupAPI.API, 'get', autospec=True)
def test_volume_creation_fails_with_invalid_consistency_group(self,
get_cg):
get_cg.side_effect = stubs.stub_consistencygroup_get_notfound
consistencygroup_id = '4f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(
consistencygroup_id=consistencygroup_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when consistency group is not found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_cg.assert_called_once_with(self.controller.consistencygroup_api,
context, consistencygroup_id)
def test_volume_creation_fails_with_bad_size(self):
vol = self._vol_in_request_body(size="")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_creation_fails_with_bad_availability_zone(self):
vol = self._vol_in_request_body(availability_zone="zonen:hostn")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req, body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_image_ref(self, mock_validate):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(
availability_zone="nova",
image_ref="c905cedb-7281-47e4-8a62-f26bc5fc4c77")
ex = self._expected_vol_from_controller(availability_zone="nova")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
def test_volume_create_with_image_ref_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_ref=1234)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_ref_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_ref="12345")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_ref_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_ref="")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_image_id(self, mock_validate):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(
availability_zone="nova",
image_id="c905cedb-7281-47e4-8a62-f26bc5fc4c77")
ex = self._expected_vol_from_controller(availability_zone="nova")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_id=1234)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_id="12345")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_id="")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_image_name(self, mock_validate):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "Fedora-x86_64-20-20140618-sda"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="nova",
image_ref=test_id)
ex = self._expected_vol_from_controller(availability_zone="nova")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
def test_volume_create_with_image_name_has_multiple(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "multi"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="nova",
image_ref=test_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create,
req,
body)
def test_volume_create_with_image_name_no_match(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "MissingName"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="nova",
image_ref=test_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ, name="Updated Test Name",
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_deprecation(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
updates = {
"display_name": "Updated Test Name",
"display_description": "Updated Test Description",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ, name="Updated Test Name",
description="Updated Test Description",
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_deprecation_key_priority(self, mock_validate):
"""Test current update keys have priority over deprecated keys."""
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
updates = {
"name": "New Name",
"description": "New Description",
"display_name": "Not Shown Name",
"display_description": "Not Shown Description",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
name="New Name", description="New Description",
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_metadata(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'attached_mode': 'rw', 'readonly': 'False',
'qos_max_iops': '2000'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_metadata_value_too_long(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
updates = {
"metadata": {"key1": ("a" * 260)}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
self.assertRaises(exc.HTTPRequestEntityTooLarge,
self.controller.update, req, fake.VOLUME_ID, body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_metadata_key_too_long(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
updates = {
"metadata": {("a" * 260): "value1"}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
self.assertRaises(exc.HTTPRequestEntityTooLarge,
self.controller.update, req, fake.VOLUME_ID, body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_metadata_empty_key(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
updates = {
"metadata": {"": "value1"}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
self.assertRaises(exc.HTTPBadRequest,
self.controller.update, req, fake.VOLUME_ID, body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_with_admin_metadata(self, mock_validate):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
volume = stubs.stub_volume(fake.VOLUME_ID)
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(),
fake.VOLUME_ID,
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': fake.VOLUME_ID, }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], fake.INSTANCE_ID, None, '/')
attach_tmp = db.volume_attachment_get(context.get_admin_context(),
attachment['id'])
volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertEqual(0, len(self.notifier.notifications))
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ, volume_type=None,
status='in-use', name='Updated Test Name',
attachments=[{'id': fake.VOLUME_ID,
'attachment_id': attachment['id'],
'volume_id': stubs.DEFAULT_VOL_ID,
'server_id': fake.INSTANCE_ID,
'host_name': None,
'device': '/',
'attached_at': attach_tmp['attach_time'].replace(
tzinfo=iso8601.iso8601.Utc()),
}],
metadata={'key': 'value', 'readonly': 'True'},
with_migration_status=True)
expected['volume']['updated_at'] = volume_tmp['updated_at'].replace(
tzinfo=iso8601.iso8601.Utc())
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, fake.VOLUME_ID, body)
def test_update_invalid_body(self):
body = {
'name': 'missing top level volume key'
}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, fake.VOLUME_ID, body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, fake.VOLUME_ID, body)
def test_volume_list_summary(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_api_get_all_by_project)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.index(req)
expected = {
'volumes': [
{
'name': stubs.DEFAULT_VOL_NAME,
'id': fake.VOLUME_ID,
'links': [
{
'href': 'http://localhost/v2/%s/volumes/%s' % (
fake.PROJECT_ID, fake.VOLUME_ID),
'rel': 'self'
},
{
'href': 'http://localhost/%s/volumes/%s' % (
fake.PROJECT_ID, fake.VOLUME_ID),
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_api_get_all_by_project)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
res_dict = self.controller.detail(req)
exp_vol = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'attached_mode': 'rw', 'readonly': 'False'})
expected = {'volumes': [exp_vol['volume']]}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail_with_admin_metadata(self):
volume = stubs.stub_volume(fake.VOLUME_ID)
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(),
fake.VOLUME_ID,
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': fake.VOLUME_ID, }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], fake.INSTANCE_ID, None, '/')
attach_tmp = db.volume_attachment_get(context.get_admin_context(),
attachment['id'])
volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID)
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.detail(req)
exp_vol = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
status="in-use", volume_type=None,
attachments=[{'attachment_id': attachment['id'],
'device': '/',
'server_id': fake.INSTANCE_ID,
'host_name': None,
'id': fake.VOLUME_ID,
'volume_id': stubs.DEFAULT_VOL_ID,
'attached_at': attach_tmp['attach_time'].replace(
tzinfo=iso8601.iso8601.Utc()),
}],
metadata={'key': 'value', 'readonly': 'True'},
with_migration_status=True)
exp_vol['volume']['updated_at'] = volume_tmp['updated_at'].replace(
tzinfo=iso8601.iso8601.Utc())
expected = {'volumes': [exp_vol['volume']]}
self.assertEqual(expected, res_dict)
def test_volume_index_with_marker(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
return [
stubs.stub_volume(fake.VOLUME_ID, display_name='vol1'),
stubs.stub_volume(fake.VOLUME2_ID, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?marker=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(2, len(volumes))
self.assertEqual(fake.VOLUME_ID, volumes[0]['id'])
self.assertEqual(fake.VOLUME2_ID, volumes[1]['id'])
def test_volume_index_limit(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes'
'?limit=1&name=foo'
'&sort=id1:asc')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
# Ensure that the next link is correctly formatted, it should
# contain the same limit, filter, and sort information as the
# original request as well as a marker; this ensures that the
# caller can simply use the "next" link and that they do not
# need to manually insert the limit and sort information.
links = res_dict['volumes_links']
self.assertEqual('next', links[0]['rel'])
href_parts = urllib.parse.urlparse(links[0]['href'])
self.assertEqual('/v2/%s/volumes' % fake.PROJECT_ID, href_parts.path)
params = urllib.parse.parse_qs(href_parts.query)
self.assertEqual(str(volumes[0]['id']), params['marker'][0])
self.assertEqual('1', params['limit'][0])
self.assertEqual('foo', params['name'][0])
self.assertEqual('id1:asc', params['sort'][0])
def test_volume_index_limit_negative(self):
req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def test_volume_index_limit_non_int(self):
req = fakes.HTTPRequest.blank('/v2/volumes?limit=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def test_volume_index_limit_marker(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(fake.VOLUME_ID, volumes[0]['id'])
def _create_db_volumes(self, num_volumes):
volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i)
for i in range(num_volumes)]
for vol in volumes:
self.addCleanup(db.volume_destroy, self.ctxt, vol.id)
volumes.reverse()
return volumes
def test_volume_index_limit_offset(self):
created_volumes = self._create_db_volumes(2)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Test that we get an exception HTTPBadRequest(400) with an offset
# greater than the maximum offset value.
url = '/v2/volumes?limit=2&offset=43543564546567575'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def test_volume_detail_with_marker(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
return [
stubs.stub_volume(fake.VOLUME_ID, display_name='vol1'),
stubs.stub_volume(fake.VOLUME2_ID, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(2, len(volumes))
self.assertEqual(fake.VOLUME_ID, volumes[0]['id'])
self.assertEqual(fake.VOLUME2_ID, volumes[1]['id'])
def test_volume_detail_limit(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
# Ensure that the next link is correctly formatted
links = res_dict['volumes_links']
self.assertEqual('next', links[0]['rel'])
href_parts = urllib.parse.urlparse(links[0]['href'])
self.assertEqual('/v2/%s/volumes/detail' % fake.PROJECT_ID,
href_parts.path)
params = urllib.parse.parse_qs(href_parts.query)
self.assertIn('marker', params)
self.assertEqual('1', params['limit'][0])
def test_volume_detail_limit_negative(self):
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
def test_volume_detail_limit_non_int(self):
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
def test_volume_detail_limit_marker(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(fake.VOLUME_ID, volumes[0]['id'])
def test_volume_detail_limit_offset(self):
created_volumes = self._create_db_volumes(2)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1',
use_admin_context=True)
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
url = '/v2/volumes/detail?limit=2&offset=4536546546546467'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
def test_volume_with_limit_zero(self):
def stub_volume_get_all(context, marker, limit, **kwargs):
return []
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=0')
res_dict = self.controller.index(req)
expected = {'volumes': []}
self.assertEqual(expected, res_dict)
def _validate_next_link(self, detailed, item_count, osapi_max_limit, limit,
should_link_exist):
keys_fns = (('volumes', self.controller.index),
('volumes/detail', self.controller.detail))
key, fn = keys_fns[detailed]
req_string = '/v2/%s?all_tenants=1' % key
if limit:
req_string += '&limit=%s' % limit
req = fakes.HTTPRequest.blank(req_string, use_admin_context=True)
link_return = [{"rel": "next", "href": "fake_link"}]
self.flags(osapi_max_limit=osapi_max_limit)
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param), \
mock.patch.object(common.ViewBuilder, '_generate_next_link',
return_value=link_return):
res_dict = fn(req)
self.assertEqual(item_count, len(res_dict['volumes']))
self.assertEqual(should_link_exist, 'volumes_links' in res_dict)
def test_volume_default_limit(self):
self.stubs.UnsetAll()
self._create_db_volumes(3)
# Verify both the index and detail queries
for detailed in (True, False):
# Number of volumes less than max, do not include
self._validate_next_link(detailed, item_count=3, osapi_max_limit=4,
limit=None, should_link_exist=False)
# Number of volumes equals the max, next link will be included
self._validate_next_link(detailed, item_count=3, osapi_max_limit=3,
limit=None, should_link_exist=True)
# Number of volumes more than the max, include next link
self._validate_next_link(detailed, item_count=2, osapi_max_limit=2,
limit=None, should_link_exist=True)
# Limit lower than max but doesn't limit, no next link
self._validate_next_link(detailed, item_count=3, osapi_max_limit=5,
limit=4, should_link_exist=False)
# Limit lower than max and limits, we have next link
self._validate_next_link(detailed, item_count=2, osapi_max_limit=4,
limit=2, should_link_exist=True)
# Limit higher than max and max limits, we have next link
self._validate_next_link(detailed, item_count=2, osapi_max_limit=2,
limit=4, should_link_exist=True)
# Limit higher than max but none of them limiting, no next link
self._validate_next_link(detailed, item_count=3, osapi_max_limit=4,
limit=5, should_link_exist=False)
def test_volume_list_default_filters(self):
"""Tests that the default filters from volume.api.API.get_all are set.
1. 'no_migration_status'=True for non-admins and get_all_by_project is
invoked.
2. 'no_migration_status' is not included for admins.
3. When 'all_tenants' is not specified, then it is removed and
get_all_by_project is invoked for admins.
3. When 'all_tenants' is specified, then it is removed and get_all
is invoked for admins.
"""
# Non-admin, project function should be called with no_migration_status
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
self.assertTrue(filters['no_migration_targets'])
self.assertNotIn('all_tenants', filters)
return [stubs.stub_volume(fake.VOLUME_ID, display_name='vol1')]
def stub_volume_get_all(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=0):
return []
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
# all_tenants does not matter for non-admin
for params in ['', '?all_tenants=1']:
req = fakes.HTTPRequest.blank('/v2/volumes%s' % params)
resp = self.controller.index(req)
self.assertEqual(1, len(resp['volumes']))
self.assertEqual('vol1', resp['volumes'][0]['name'])
# Admin, all_tenants is not set, project function should be called
# without no_migration_status
def stub_volume_get_all_by_project2(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
self.assertNotIn('no_migration_targets', filters)
return [stubs.stub_volume(fake.VOLUME_ID, display_name='vol2')]
def stub_volume_get_all2(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=0):
return []
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project2)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all2)
req = fakes.HTTPRequest.blank('/v2/volumes', use_admin_context=True)
resp = self.controller.index(req)
self.assertEqual(1, len(resp['volumes']))
self.assertEqual('vol2', resp['volumes'][0]['name'])
# Admin, all_tenants is set, get_all function should be called
# without no_migration_status
def stub_volume_get_all_by_project3(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
return []
def stub_volume_get_all3(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=0):
self.assertNotIn('no_migration_targets', filters)
self.assertNotIn('all_tenants', filters)
return [stubs.stub_volume(fake.VOLUME3_ID, display_name='vol3')]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project3)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all3)
req = fakes.HTTPRequest.blank('/v2/volumes?all_tenants=1',
use_admin_context=True)
resp = self.controller.index(req)
self.assertEqual(1, len(resp['volumes']))
self.assertEqual('vol3', resp['volumes'][0]['name'])
def test_volume_show(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
res_dict = self.controller.show(req, fake.VOLUME_ID)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volume
self.assertIsNotNone(req.cached_resource_by_id(fake.VOLUME_ID))
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = stubs.stub_volume(volume_id, attach_status='detached')
return fake_volume.fake_volume_obj(context, **vol)
def stub_volume_admin_metadata_get(context, volume_id, **kwargs):
return stubs.stub_volume_admin_metadata_get(
context, volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
self.stubs.Set(db, 'volume_admin_metadata_get',
stub_volume_admin_metadata_get)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
res_dict = self.controller.show(req, fake.VOLUME_ID)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'readonly': 'False'})
self.assertEqual(expected, res_dict)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1)
# Finally test that nothing was cached
self.assertIsNone(req.cached_resource_by_id(fake.VOLUME_ID))
def test_volume_show_with_admin_metadata(self):
volume = stubs.stub_volume(fake.VOLUME_ID)
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(),
fake.VOLUME_ID,
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': fake.VOLUME_ID, }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], fake.INSTANCE_ID, None, '/')
attach_tmp = db.volume_attachment_get(context.get_admin_context(),
attachment['id'])
volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.show(req, fake.VOLUME_ID)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
volume_type=None, status='in-use',
attachments=[{'id': fake.VOLUME_ID,
'attachment_id': attachment['id'],
'volume_id': stubs.DEFAULT_VOL_ID,
'server_id': fake.INSTANCE_ID,
'host_name': None,
'device': '/',
'attached_at': attach_tmp['attach_time'].replace(
tzinfo=iso8601.iso8601.Utc()),
}],
metadata={'key': 'value', 'readonly': 'True'},
with_migration_status=True)
expected['volume']['updated_at'] = volume_tmp['updated_at'].replace(
tzinfo=iso8601.iso8601.Utc())
self.assertEqual(expected, res_dict)
def test_volume_show_with_encrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = stubs.stub_volume(volume_id, encryption_key_id=fake.KEY_ID)
return fake_volume.fake_volume_obj(context, **vol)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
res_dict = self.controller.show(req, fake.VOLUME_ID)
self.assertTrue(res_dict['volume']['encrypted'])
def test_volume_show_with_unencrypted_volume(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
res_dict = self.controller.show(req, fake.VOLUME_ID)
self.assertEqual(False, res_dict['volume']['encrypted'])
def test_volume_delete(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
resp = self.controller.delete(req, fake.VOLUME_ID)
self.assertEqual(202, resp.status_int)
def test_volume_delete_attached(self):
def stub_volume_attached(self, context, volume,
force=False, cascade=False):
raise exception.VolumeAttached(volume_id=volume['id'])
self.stubs.Set(volume_api.API, "delete", stub_volume_attached)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
exp = self.assertRaises(exception.VolumeAttached,
self.controller.delete,
req, 1)
expect_msg = "Volume 1 is still attached, detach volume first."
self.assertEqual(expect_msg, six.text_type(exp))
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1)
def test_admin_list_volumes_limited_to_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID,
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank(
'/v2/%s/volumes?all_tenants=1' % fake.PROJECT_ID,
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank(
'/v2/%s/volumes?all_tenants=1' % fake.PROJECT_ID)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def _create_volume_bad_request(self, body):
req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID)
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_volume_bad_request(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._create_volume_bad_request(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._create_volume_bad_request(body=body)
def _test_get_volumes_by_name(self, get_all, display_name):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'display_name': display_name}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': display_name},
viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_string(self, get_all):
"""Test to get a volume with an alpha-numeric display name."""
self._test_get_volumes_by_name(get_all, 'Volume-573108026')
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_double_quoted_string(self, get_all):
"""Test to get a volume with a double-quoted display name."""
self._test_get_volumes_by_name(get_all, '"Volume-573108026"')
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_single_quoted_string(self, get_all):
"""Test to get a volume with a single-quoted display name."""
self._test_get_volumes_by_name(get_all, "'Volume-573108026'")
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_quote_in_between_string(self, get_all):
"""Test to get a volume with a quote in between the display name."""
self._test_get_volumes_by_name(get_all, 'Volu"me-573108026')
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_mixed_quoted_string(self, get_all):
"""Test to get a volume with a mix of single and double quotes. """
# The display name starts with a single quote and ends with a
# double quote
self._test_get_volumes_by_name(get_all, '\'Volume-573108026"')
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_true(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'display_name': 'Volume-573108026', 'bootable': 1}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'Volume-573108026', 'bootable': True},
viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_false(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'display_name': 'Volume-573108026', 'bootable': 0}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'Volume-573108026', 'bootable': False},
viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_list(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'id': "['%s', '%s', '%s']" % (
fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID)}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'id': [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]},
viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_expression(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'name': "d-"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'd-'}, viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_status(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'status': 'available'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'status': 'available'}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_metadata(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'metadata': "{'fake_key': 'fake_value'}"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'metadata': {'fake_key': 'fake_value'}},
viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_availability_zone(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'availability_zone': 'nova'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'availability_zone': 'nova'}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_bootable(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'bootable': 1}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'bootable': True}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_invalid_filter(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'invalid_filter': 'invalid',
'availability_zone': 'nova'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'availability_zone': 'nova'}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_sort_by_name(self, get_all):
"""Name in client means display_name in database."""
req = mock.MagicMock()
ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'sort': 'name'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['display_name'], filters={}, offset=0)
def test_get_volume_filter_options_using_config(self):
filter_list = ['name', 'status', 'metadata', 'bootable',
'availability_zone']
self.override_config('query_volume_filters', filter_list)
self.assertEqual(filter_list,
self.controller._get_volume_filter_options())
| apache-2.0 |
alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/_xmlplus/dom/ext/reader/__init__.py | 10 | 2207 | ########################################################################
#
# File Name: __init__.py
#
#
"""
The 4DOM reader module has routines for deserializing XML and HTML to DOM
WWW: http://4suite.org/4DOM e-mail: support@4suite.org
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.org/COPYRIGHT for license and copyright information
"""
import string, urllib2, urlparse, cStringIO, os
from xml.dom.ext import ReleaseNode
try:
import codecs
from types import UnicodeType
encoder = codecs.lookup("utf-8")[0] # encode,decode,reader,writer
def StrStream(st):
if type(st) is UnicodeType:
st = encoder(st)[0]
return cStringIO.StringIO(st)
except ImportError:
StrStream = lambda x: cStringIO.StringIO(x)
class BaseUriResolver:
def resolve(self, uri, base=''):
#scheme, netloc, path, params, query, fragment
scheme = urlparse.urlparse(uri)[0]
if scheme in ['', 'http', 'ftp', 'file', 'gopher']:
uri = urlparse.urljoin(base, uri)
if os.access(uri, os.F_OK):
#Hack because urllib breaks on Windows paths
stream = open(uri)
else:
stream = urllib2.urlopen(uri)
return stream
BASIC_RESOLVER = BaseUriResolver()
class Reader:
def clone(self):
"""Used to create a new copy of this instance"""
if hasattr(self,'__getinitargs__'):
return apply(self.__class__,self.__getinitargs__())
else:
return self.__class__()
def fromStream(self, stream, ownerDoc=None):
"""Create a DOM from a stream"""
raise "NOT OVERIDDEN"
def fromString(self, str, ownerDoc=None):
"""Create a DOM from a string"""
stream = StrStream(str)
try:
return self.fromStream(stream, ownerDoc)
finally:
stream.close()
def fromUri(self, uri, ownerDoc=None):
stream = BASIC_RESOLVER.resolve(uri)
try:
return self.fromStream(stream, ownerDoc)
finally:
stream.close()
def releaseNode(self, node):
"Free a DOM tree"
node and ReleaseNode(node)
| agpl-3.0 |
titilambert/harbour-squilla | embedded_libs/bs4/__init__.py | 14 | 15389 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = 'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = '[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = list(kwargs.keys()).pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, str):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, str)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception as e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, str) and not ' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = ''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = ''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print(soup.prettify())
| gpl-3.0 |
adviti/melange | app/soc/modules/gsoc/models/follower.py | 1 | 1523 | #!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Follower Model."""
from google.appengine.ext import db
import soc.models.linkable
class Follower(soc.models.linkable.Linkable):
"""Details specific to a Follower.
A Follower is a generic model which indicates that a User is following
some other Linkable entity in the application.
Scope and scope_path should be set to the entity being followed.
The link_id should be used to indicate which user is following.
If more functionality is needed like for instance when following
either a public or private review for Student Proposals this model
should be extended. As to make it possible to create different types
of following.
"""
#: Required property to tie a user to the entity it is following
user = db.ReferenceProperty(reference_class=soc.models.user.User,
required=True, collection_name='following')
| apache-2.0 |
rschnapka/bank-statement-reconcile | account_easy_reconcile/__init__.py | 8 | 1130 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2012 Camptocamp SA (Guewen Baconnier)
# Copyright (C) 2010 Sébastien Beau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import easy_reconcile
from . import base_reconciliation
from . import simple_reconciliation
from . import easy_reconcile_history
from . import res_config
| agpl-3.0 |
AlanJAS/iknowUruguay | recursos/4paysandu/datos/4paysandu.py | 1 | 3809 | # -*- coding: utf-8 -*-
from gettext import gettext as _
NAME = _('Paysandú')
STATES = [
(_('Salto'), 254, 417, 156, 0),
(_('Paysandú'), 253, 347, 466, 0),
(_('Río Negro'), 252, 294, 767, 0),
(_('Durazno'), 251, 624, 862, 0),
(_('Tacuarembó'), 250, 737, 577, -90),
(_('Argentina'), 249, 33, 150, 90)
]
CITIES = [
(_('Paysandú'), 75, 590, 1, 45, -5),
(_('Chapicuy'), 140, 289, 2, 20, -14),
(_('Bella Vista'), 203, 298, 2, 0, 14),
(_('Pueblo Gallinal'), 320, 359, 2, 0, -14),
(_('Cerro Chato'), 296, 368, 2, -20, 14),
(_('Quebracho'), 140, 411, 2, 0, -14),
(_('Soto'), 266, 406, 2, 0, 12),
(_('El Eucalipto'), 376, 381, 2, 15, -14),
(_('Tambores'), 742, 385, 2, -45, 0),
(_('Guarapirí'), 393, 392, 2, 0, 12),
(_('Queguayar'), 160, 445, 2, 0, -14),
(_('Lorenzo Geyres'), 150, 466, 2, 0, 12),
(_('Cuchilla del Fuego'), 472, 468, 2, 0, -14),
(_('Piedra Sola'), 720, 465, 2, -50, 0),
(_('Constancia'), 103, 517, 2, 0, -14),
(_('Curapí'), 77, 544, 2, 22, -12),
(_('Esperanza'), 149, 569, 2, 0, -12),
(_('Porvenir'), 149, 601, 2, 0, 12),
(_('Piedras Coloradas'), 258, 582, 2, 0, -14),
(_('Orgoroso'), 291, 595, 2, 42, 0),
(_('Arroyo Negro'), 302, 617, 2, -60, 0),
(_('La Tentación'), 202, 643, 2, 0, 12),
(_('Guichón'), 397, 585, 2, -5, -14),
(_('Beisso'), 460, 569, 2, 5, -14),
(_('Tres Arboles'), 575, 596, 2, -20, -14),
(_('Tiatucurá'), 632, 577, 2, -10, -14)
]
RIVERS = [
(_('Río Uruguay'), 254, 10, 450, 90),
(_('Río Daymán'), 253, 291, 268, -25),
(_('Río Queguay Grande'), 252, 112, 502, 0),
(_('Río Queguay Chico'), 251, 521, 392, 35),
(_('Río Negro'), 250, 676, 818, 0),
(_('Arroyo Guaviyú'), 247, 132, 379, 0),
(_('Arroyo Quebracho Grande'), 245, 131, 428, 25),
(_('Arroyo Araujo'), 244, 195, 443, 45),
(_('Arroyo de Soto'), 243, 247, 452, 45),
(_('Arroyo Buricayupí'), 242, 293, 449, 60),
(_('Arroyo Carumbé'), 241, 265, 308, -55),
(_('Arroyo San Francisco Grande'), 238, 144, 580, -5),
(_('Arroyo Negro'), 236, 118, 692, -30),
(_('Arroyo Rabón'), 235, 139, 615, 30),
(_('Arroyo Valdés'), 234, 190, 637, 10),
(_('Arroyo Celestina'), 233, 183, 655, 0),
(_('Arroyo Bacacuá Grande'), 231, 250, 550, -45),
(_('Arroyo Ñacurutú Grande'), 228, 337, 553, -55),
(_('Arroyo de la Capilla'), 227, 273, 540, -45),
(_('Arroyo Guayabos'), 226, 359, 556, -60),
(_('Arroyo Santa Ana'), 225, 418, 544, -45),
(_('Arroyo de los Corrales'), 222, 578, 428, 60),
(_('Arroyo de los Molles'), 221, 501, 490, 60),
(_('Arroyo Zapatero'), 218, 674, 490, 20),
(_('Arroyo Juan Tomás'), 211, 587, 598, -30),
(_('Arroyo Salsipuedes Grande'), 210, 656, 632, 74),
(_('Arroyo Molles Grande'), 206, 433, 418, 65),
(_('Arroyo Molles Chico'), 205, 458, 418, 65),
(_('Arroyo Gualeguay'), 204, 344, 445, 55),
(_('Arroyo Guarapirú'), 203, 378, 423, 75)
]
CUCHILLAS = [
(_('Cuchilla del Queguay'), 254, 377, 373, 0),
(_('Cuchilla de Haedo'), 253, 521, 593, 0)
]
HILLS = [
(_('Cerro de los Difuntos'), 169, 482, 5, 0, 14),
(_('Cerro Buricayupí'), 264, 454, 5, 0, 14),
(_('Cerro de la Glorieta'), 450, 387, 5, 0, 14),
(_('Cerro del Vigía'), 581, 395, 5, 0, 14),
(_('Cerro Campana'), 644, 434, 5, 0, 14),
(_('Cerro Arbolito'), 736, 386, 5, -60, -2),
(_('Cerro del Toro'), 499, 581, 5, 0, -14),
(_('Cerro Palmera Sola'), 437, 589, 5, 0, 16),
(_('Meseta de Artigas'), 117, 273, 5, 68, 14)
]
ROUTES = [
(_('Route %s') % 3, 253, 99, 635, -45),
(_('Route %s') % 26, 254, 298, 451, 0),
(_('Route %s') % 4, 252, 380, 495, -50),
(_('Route %s') % 90, 251, 250, 592, -5),
(_('Route %s') % 24, 250, 94, 685, 90)
]
| gpl-3.0 |
nhicher/ansible | lib/ansible/modules/remote_management/oneview/oneview_ethernet_network.py | 147 | 8911 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network
short_description: Manage OneView Ethernet Network resources
description:
- Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- hpOneView >= 3.1.0
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
state:
description:
- Indicates the desired state for the Ethernet Network resource.
- C(present) will ensure data properties are compliant with OneView.
- C(absent) will remove the resource from OneView, if it exists.
- C(default_bandwidth_reset) will reset the network connection template to the default.
default: present
choices: [present, absent, default_bandwidth_reset]
data:
description:
- List with Ethernet Network properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Ensure that the Ethernet Network is present using the default configuration
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
vlanId: '201'
delegate_to: localhost
- name: Update the Ethernet Network changing bandwidth and purpose
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
purpose: Management
bandwidth:
maximumBandwidth: 3000
typicalBandwidth: 2000
delegate_to: localhost
- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
newName: 'Renamed Ethernet Network'
delegate_to: localhost
- name: Ensure that the Ethernet Network is absent
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: absent
data:
name: 'New Ethernet Network'
delegate_to: localhost
- name: Create Ethernet networks in bulk
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
vlanIdRange: '1-10,15,17'
purpose: General
namePrefix: TestNetwork
smartLink: false
privateNetwork: false
bandwidth:
maximumBandwidth: 10000
typicalBandwidth: 2000
delegate_to: localhost
- name: Reset to the default network connection template
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: default_bandwidth_reset
data:
name: 'Test Ethernet Network'
delegate_to: localhost
'''
RETURN = '''
ethernet_network:
description: Has the facts about the Ethernet Networks.
returned: On state 'present'. Can be null.
type: dict
ethernet_network_bulk:
description: Has the facts about the Ethernet Networks affected by the bulk insert.
returned: When 'vlanIdRange' attribute is in data argument. Can be null.
type: dict
ethernet_network_connection_template:
description: Has the facts about the Ethernet Network Connection Template.
returned: On state 'default_bandwidth_reset'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
class EthernetNetworkModule(OneViewModuleBase):
MSG_CREATED = 'Ethernet Network created successfully.'
MSG_UPDATED = 'Ethernet Network updated successfully.'
MSG_DELETED = 'Ethernet Network deleted successfully.'
MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
RESOURCE_FACT_NAME = 'ethernet_network'
def __init__(self):
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
data=dict(type='dict', required=True),
)
super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
changed, msg, ansible_facts, resource = False, '', {}, None
if self.data.get('name'):
resource = self.get_by_name(self.data['name'])
if self.state == 'present':
if self.data.get('vlanIdRange'):
return self._bulk_present()
else:
return self._present(resource)
elif self.state == 'absent':
return self.resource_absent(resource)
elif self.state == 'default_bandwidth_reset':
changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
def _present(self, resource):
bandwidth = self.data.pop('bandwidth', None)
scope_uris = self.data.pop('scopeUris', None)
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
if bandwidth:
if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
result['changed'] = True
result['msg'] = self.MSG_UPDATED
if scope_uris is not None:
result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
return result
def _bulk_present(self):
vlan_id_range = self.data['vlanIdRange']
result = dict(ansible_facts={})
ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
if not ethernet_networks:
self.resource_client.create_bulk(self.data)
result['changed'] = True
result['msg'] = self.MSG_BULK_CREATED
else:
vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
for net in ethernet_networks[:]:
vlan_ids.remove(net['vlanId'])
if len(vlan_ids) == 0:
result['msg'] = self.MSG_BULK_ALREADY_EXIST
result['changed'] = False
else:
if len(vlan_ids) == 1:
self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
else:
self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
self.resource_client.create_bulk(self.data)
result['changed'] = True
result['msg'] = self.MSG_MISSING_BULK_CREATED
result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
return result
def _update_connection_template(self, ethernet_network, bandwidth):
if 'connectionTemplateUri' not in ethernet_network:
return False, None
connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
merged_data = connection_template.copy()
merged_data.update({'bandwidth': bandwidth})
if not self.compare(connection_template, merged_data):
connection_template = self.oneview_client.connection_templates.update(merged_data)
return True, connection_template
else:
return False, None
def _default_bandwidth_reset(self, resource):
if not resource:
raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
default_connection_template = self.oneview_client.connection_templates.get_default()
changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
ethernet_network_connection_template=connection_template)
def main():
EthernetNetworkModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
indictranstech/trufil-erpnext | erpnext/shopping_cart/test_shopping_cart.py | 20 | 6171 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from erpnext.shopping_cart.cart import _get_cart_quotation, update_cart, get_customer
class TestShoppingCart(unittest.TestCase):
"""
Note:
Shopping Cart == Quotation
"""
def setUp(self):
frappe.set_user("Administrator")
self.enable_shopping_cart()
def tearDown(self):
frappe.set_user("Administrator")
self.disable_shopping_cart()
def test_get_cart_new_user(self):
self.login_as_new_user()
# test if lead is created and quotation with new lead is fetched
quotation = _get_cart_quotation()
self.assertEquals(quotation.quotation_to, "Customer")
self.assertEquals(frappe.db.get_value("Contact", {"customer": quotation.customer}, "email_id"),
"test_cart_user@example.com")
self.assertEquals(quotation.lead, None)
self.assertEquals(quotation.contact_email, frappe.session.user)
return quotation
def test_get_cart_customer(self):
self.login_as_customer()
# test if quotation with customer is fetched
quotation = _get_cart_quotation()
self.assertEquals(quotation.quotation_to, "Customer")
self.assertEquals(quotation.customer, "_Test Customer")
self.assertEquals(quotation.lead, None)
self.assertEquals(quotation.contact_email, frappe.session.user)
return quotation
def test_add_to_cart(self):
self.login_as_customer()
# remove from cart
self.remove_all_items_from_cart()
# add first item
update_cart("_Test Item", 1)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item")
self.assertEquals(quotation.get("items")[0].qty, 1)
self.assertEquals(quotation.get("items")[0].amount, 10)
# add second item
update_cart("_Test Item 2", 1)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[1].item_code, "_Test Item 2")
self.assertEquals(quotation.get("items")[1].qty, 1)
self.assertEquals(quotation.get("items")[1].amount, 20)
self.assertEquals(len(quotation.get("items")), 2)
def test_update_cart(self):
# first, add to cart
self.test_add_to_cart()
# update first item
update_cart("_Test Item", 5)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item")
self.assertEquals(quotation.get("items")[0].qty, 5)
self.assertEquals(quotation.get("items")[0].amount, 50)
self.assertEquals(quotation.net_total, 70)
self.assertEquals(len(quotation.get("items")), 2)
def test_remove_from_cart(self):
# first, add to cart
self.test_add_to_cart()
# remove first item
update_cart("_Test Item", 0)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item 2")
self.assertEquals(quotation.get("items")[0].qty, 1)
self.assertEquals(quotation.get("items")[0].amount, 20)
self.assertEquals(quotation.net_total, 20)
self.assertEquals(len(quotation.get("items")), 1)
# remove second item
update_cart("_Test Item 2", 0)
quotation = self.test_get_cart_customer()
self.assertEquals(len(quotation.get("items")), 0)
self.assertEquals(quotation.net_total, 0)
def test_tax_rule(self):
self.login_as_customer()
quotation = self.create_quotation()
from erpnext.accounts.party import set_taxes
tax_rule_master = set_taxes(quotation.customer, "Customer", \
quotation.transaction_date, quotation.company, None, None, \
quotation.customer_address, quotation.shipping_address_name, 1)
self.assertEquals(quotation.taxes_and_charges, tax_rule_master)
self.assertEquals(quotation.total_taxes_and_charges, 1000.0)
self.remove_test_quotation(quotation)
def create_quotation(self):
quotation = frappe.new_doc("Quotation")
values = {
"doctype": "Quotation",
"quotation_to": "Customer",
"order_type": "Shopping Cart",
"customer": get_customer(frappe.session.user).name,
"docstatus": 0,
"contact_email": frappe.session.user,
"selling_price_list": "_Test Price List Rest of the World",
"currency": "USD",
"taxes_and_charges" : "_Test Tax 1",
"items": [{
"item_code": "_Test Item",
"qty": 1
}],
"taxes": frappe.get_doc("Sales Taxes and Charges Template", "_Test Tax 1").taxes,
"company": "_Test Company"
}
quotation.update(values)
quotation.insert(ignore_permissions=True)
return quotation
def remove_test_quotation(self, quotation):
frappe.set_user("Administrator")
quotation.delete()
# helper functions
def enable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.update({
"enabled": 1,
"company": "_Test Company",
"default_customer_group": "_Test Customer Group",
"quotation_series": "_T-Quotation-",
"price_list": "_Test Price List India"
})
# insert item price
if not frappe.db.get_value("Item Price", {"price_list": "_Test Price List India",
"item_code": "_Test Item"}):
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item",
"price_list_rate": 10
}).insert()
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item 2",
"price_list_rate": 20
}).insert()
settings.save()
frappe.local.shopping_cart_settings = None
def disable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.enabled = 0
settings.save()
frappe.local.shopping_cart_settings = None
def login_as_new_user(self):
frappe.set_user("test_cart_user@example.com")
def login_as_customer(self):
frappe.set_user("test_contact_customer@example.com")
def remove_all_items_from_cart(self):
quotation = _get_cart_quotation()
quotation.set("items", [])
quotation.save(ignore_permissions=True)
test_dependencies = ["Sales Taxes and Charges Template", "Price List", "Item Price", "Shipping Rule", "Currency Exchange",
"Customer Group", "Lead", "Customer", "Contact", "Address", "Item", "Tax Rule"]
| agpl-3.0 |
tacodog1/tdameritrade | tdapi.py | 1 | 27387 | from __future__ import unicode_literals
import datetime
import base64
import http.client
import urllib.request, urllib.parse, urllib.error
import getpass
import binascii
import time
import array
import string
import math, types
from struct import pack, unpack
from xml.etree import ElementTree
import logging
import pandas
class StockQuote():
symbol = None
description = None
bid = None
ask = None
bidAskSize = None
last = None
lastTradeSize = None
lastTradeDate = None
openPrice = None
highPrice = None
lowPrice = None
closePrice = None
volume = None
yearHigh = None
yearLow = None
realTime = None
exchange = None
assetType = None
change = None
changePercent = None
def __init__(self, elementTree):
i = elementTree
self.symbol = i.findall('symbol')[0].text
self.description = i.findall('description')[0].text
self.bid = float(i.findall('bid')[0].text)
self.ask = float(i.findall('ask')[0].text)
self.bidAskSize = i.findall('bid-ask-size')[0].text
self.last = float(i.findall('last')[0].text)
self.lastTradeSize = i.findall('last-trade-size')[0].text
self.lastTradeDate = i.findall('last-trade-date')[0].text
self.openPrice = float(i.findall('open')[0].text)
self.highPrice = float(i.findall('high')[0].text)
self.lowPrice = float(i.findall('low')[0].text)
self.closePrice = float(i.findall('close')[0].text)
self.volume = float(i.findall('volume')[0].text)
self.yearHigh = float(i.findall('year-high')[0].text)
self.yearLow = float(i.findall('year-low')[0].text)
self.realTime = i.findall('real-time')[0].text
self.exchange = i.findall('exchange')[0].text
self.assetType = i.findall('asset-type')[0].text
self.change = float(i.findall('change')[0].text)
self.changePercent = i.findall('change-percent')[0].text
def __str__(self):
s = ''
for key in dir(self):
if key[0] != '_':
s += '%s: %s\n' % (key, getattr(self, key))
return s
class OptionChainElement():
quoteDateTime = None
# Option Date Length - Short - 2
# Option Date - String - Variable
optionDate = None
# Expiration Type Length - Short - 2
# Expiration Type - String - Variable (R for Regular, L for LEAP)
expirationType = None
# Strike Price - Double - 8
strike = None
# Standard Option Flag - Byte - 1 (1 = true, 0 = false)
standardOptionFlag = None
# Put/Call Indicator - Char - 2 (P or C in unicode)
pcIndicator = None
# Option Symbol Length - Short - 2
# Option Symbol - String - Variable
optionSymbol = None
# Option Description Length - Short - 2
# Option Description - String - Variable
optionDescription = None
# Bid - Double - 8
bid = None
# Ask - Double - 8
ask = None
# Bid/Ask Size Length - Short - 2
# Bid/Ask Size - String - Variable
baSize = None
# Last - Double - 8
last = None
# Last Trade Size Length - Short - 2
# Last Trade Size - String - Variable
lastTradeSize = None
# Last Trade Date Length - short - 2
# Last Trade Date - String - Variable
lastTradeDate = None
# Volume - Long - 8
volume = None
# Open Interest - Integer - 4
openInterest = None
# RT Quote Flag - Byte - 1 (1=true, 0=false)
rtQuoteFlag = None
# Underlying Symbol length - Short 2
# Underlying Symbol - String - Variable
underlyingSymbol = None
# Delta - Double- 8
# Gamma - Double - 8
# Theta - Double - 8
# Vega - Double - 8
# Rho - Double - 8
delta = None
gamma = None
theta = None
vega = None
rho = None
# Implied Volatility - Double - 8
impliedVolatility = None
# Time Value Index - Double - 8
timeValueIndex = None
# Multiplier - Double - 8
multiplier = None
# Change - Double - 8
change = None
# Change Percentage - Double - 8
changePercentage = None
# ITM Flag - Byte - 1 (1 = true, 0 = false)
itmFlag = None
# NTM Flag - Byte - 1 (1 = true, 0 = false)
ntmFlag = None
# Theoretical value - Double - 8
theoreticalValue = None
# Deliverable Note Length - Short - 2
# Deliverable Note - String - Variable
deliverableNote = None
# CIL Dollar Amount - Double - 8
cilDollarAmoubt = None
# OP Cash Dollar Amount - Double - 8
opCashDollarAmount = None
# Index Option Flag - Byte - 1 (1 = true, 0 = false)
indexOptionFlag = None
# Number of Deliverables - Integer - 4
deliverables = [] # (symbol, shares)
# REPEATING block for each Deliverable
# Deliverable Symbol Length - Short - 2
# Deliverable Symbol - String - Variable
# Deliverable Shares - Integer - 4
# END
def setQuoteDateTime(self, quoteTime):
#self.quoteDateTime = datetime.datetime.now()
self.quoteDateTime = quoteTime
def __str__(self):
s = self.optionDescription.decode()
if self.last != None:
s = '%s Last: $%.2f' % (s, self.last)
else:
s = '%s Last: N/A' % s
if not (self.delta is None or self.gamma is None or self.theta is None or \
self.vega is None or self.rho is None):
s = "%s (d: %f g: %f t: %f v: %f r: %f)" % \
(s, self.delta, self.gamma, self.theta, self.vega, self.rho)
# date
# expirationType
# strike
# standardOptionFlag
# pcIndicator
# optionSymbol
# optionDescription
# bid
# ask
# baSize
# last
# lastTradeSize
# lastTradeDate
# volume
# openInterest
# rtQuoteFlag
# underlyingSymbol
# delta
# gamma
# theta
# vega
# rho
# impliedVolatility
# timeValueIndex
# multiplier
# change
# changePercentage
# itmFlag
# ntmFlag
# theoreticalValue
# deliverableNote
# cilDollarAmoubt
# opCashDollarAmount
# indexOptionFlag
# deliverables = [] # (symbol, shares)
# REPEATING block for each Deliverable
# Deliverable Symbol Length - Short - 2
# Deliverable Symbol - String - Variable
# Deliverable Shares - Integer - 4
return s
# Python3
__repr__ = __str__
class HistoricalPriceBar():
close = None
high = None
low = None
open = None
volume = None
timestamp = None
def __init__(self):
pass
def __str__(self):
return '%f,%f,%f,%f,%f,%s' % (self.close, self.high, self.low, self.open, self.volume, self.timestamp)
class TDAmeritradeAPI():
_sourceID = None # Note - Source ID must be provided by TD Ameritrade
_version = '0.1'
_active = False
_sessionID = ''
def __init__(self, sourceID):
self._sourceID = sourceID
def isActive(self, confirm=False):
if self._active == True:
# Confirm with server by calling KeepAlive
if confirm:
if self.keepAlive() == False:
self.login()
# TODO: add more robust checking here to make sure we're really logged in
return True
else:
return False
def keepAlive(self):
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
conn.request('POST', '/apps/100/KeepAlive?source=%s' % self._sourceID)
response = conn.getresponse()
#print response.status, response.reason
#print 'Getting response data...'
data = response.read()
#print 'Data:',data
conn.close()
if data.strip() == 'LoggedOn':
return True
elif data.strip() == 'InvalidSession':
return False
else:
#print 'Unrecognized response: %s' % data
pass
def login(self, login, password):
logging.debug('[tdapi] Entered login()')
params = urllib.parse.urlencode({'source': self._sourceID, 'version': self._version})
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.parse.urlencode({'userid': login,
'password': password,
'source': self._sourceID,
'version': self._version})
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('POST', '/apps/100/LogIn?'+params, body, headers)
response = conn.getresponse()
if response.status == 200:
self._active = True
else:
self._active = False
return False
# The data response is an XML fragment. Log it.
data = response.read()
logging.debug('Login response:\n--------%s\n--------' % data)
conn.close()
# Make sure the login succeeded. First look for <result>OK</result>
element = ElementTree.XML(data)
try:
result = element.findall('result')[0].text
if result == 'FAIL':
self._active = False
return False
elif result == 'OK':
self._active = True
else:
logging.error('Unrecognized login result: %s' % result)
return False
# Now get the session ID
self._sessionID = element.findall('xml-log-in')[0].findall('session-id')[0].text
except:
logging.error('Failed to parse login response.')
return False
def logout(self):
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
conn.request('POST', '/apps/100/LogOut?source=%s' % self._sourceID)
response = conn.getresponse()
data = response.read()
conn.close()
self._active = False
def getSessionID(self):
return self._sessionID
def getStreamerInfo(self, accountID=None):
arguments = {'source': self._sourceID}
if accountID != None:
arguments['accountid'] = accountID
params = urllib.parse.urlencode(arguments)
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('GET', '/apps/100/StreamerInfo?'+params)
response = conn.getresponse()
#print response.status, response.reason
data = response.read()
conn.close()
#print 'Read %d bytes' % len(data)
# We will need to create an ElementTree to process this XML response
# TODO: handle exceptions
element = ElementTree.XML(data)
# Process XML response
streamerInfo = {}
try:
children = element.findall('streamer-info')[0].getchildren()
for c in children:
streamerInfo[c.tag] = c.text
except e:
#print 'Error: failed to parse streamer-info response: %s', e
return False
#print 'Received streamer-info properties: %s' % streamerInfo
return streamerInfo
def getSnapshotQuote(self, tickers, assetType, detailed=False):
logging.info('[tdapi.getSnapshotQuote] Enter')
if len(tickers) > 300:
logging.error('TODO: divide in batches of 300')
if assetType not in ['stock','option','index','mutualfund']:
logging.error('getSnapshotQuote: Unrecognized asset type %s' % assetType)
return []
arguments = {'source': self._sourceID,
'symbol': str.join(',', tickers)}
params = urllib.parse.urlencode(arguments)
#print 'Arguments: ', arguments
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('GET', ('/apps/100/Quote;jsessionid=%s?' % self.getSessionID()) +params)
response = conn.getresponse()
data = response.read()
conn.close()
logging.info('[tdapi.getSnapshotQuote] Read %d bytes' % len(data))
quotes = {}
# Perform basic processing regardless of quote type
element = ElementTree.XML(data)
try:
result = element.findall('result')[0].text
if result == 'FAIL':
self._active = False
return False
elif result == 'OK':
self._active = True
else:
logging.error('[tdapi.getSnapshotQuote] Unrecognized result: %s' % result)
return {}
# Now get the session ID
#self._sessionID = element.findall('xml-log-in')[0].findall('session-id')[0].text
except:
logging.error('[tdapi.getSnapshotQuote] Failed to parse snapshot quote response.')
return False
if assetType == 'stock':
try:
quoteElements = element.findall('quote-list')[0].findall('quote')
for i in quoteElements:
symbol = i.findall('symbol')[0].text
if detailed:
q = StockQuote(i) # Create a quote object from etree
quotes[symbol] = q
else:
last = float(i.findall('last')[0].text)
quotes[symbol] = last
except:
logging.error('Failed to parse snapshot quote response')
return {}
else:
logging.error('[tdapi.getSnapshotQuote] Asset type not supported: %s' % assetType)
return quotes
def getBinaryOptionChain(self, ticker):
arguments = {'source': self._sourceID,
'symbol': ticker,
'range': 'ALL',
'quotes': 'true'
}
params = urllib.parse.urlencode(arguments)
#print 'Arguments: ', arguments
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
#conn.set_debuglevel(100)
conn.request('GET', ('/apps/200/BinaryOptionChain;jsessionid=%s?' % self.getSessionID()) +params)
response = conn.getresponse()
data = response.read()
conn.close()
cursor = 0
error = unpack('b', data[cursor:cursor+1])[0]
cursor += 1
# If there is an error, there will be an error length and corresponding error text
if error != 0:
errorLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
if errorLength > 0:
errorText = data[cursor:cursor+errorLength]
cursor += errorLength
raise ValueError('[getBinaryOptionChain] Error: %s' % errorText)
symbolLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
symbol = data[cursor:cursor+symbolLength].decode('utf-8')
cursor += symbolLength
symbolDescriptionLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
symbolDescription = data[cursor:cursor+symbolDescriptionLength].decode('utf-8')
cursor += symbolDescriptionLength
bid = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
ask = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
baSizeLength = unpack('>h', data[cursor:cursor+2])[0]
cursor += 2
baSize = data[cursor:cursor+baSizeLength]
cursor += baSizeLength
last = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
open = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
high = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
low = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
close = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
volume = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
change = unpack('>d', data[cursor:cursor+8])[0]
cursor += 8
rtFlag = chr(unpack('>H', data[cursor:cursor+2])[0])
cursor += 2
qtLength = unpack('>H', data[cursor:cursor+2])[0]
cursor += 2
quoteTime = data[cursor:cursor+qtLength]
cursor += qtLength
rowCount = unpack('>i', data[cursor:cursor+4])[0]
cursor += 4
optionChain = []
for i in range(rowCount):
if cursor > len(data):
print('Error! Read too much data')
break
o = OptionChainElement()
optionChain.append(o)
# Option Date Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Option Date - String - Variable
o.optionDate = data[cursor:cursor+l]; cursor += l
# Expiration Type Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Expiration Type - String - Variable (R for Regular, L for LEAP)
o.expirationType = data[cursor:cursor+l]; cursor += l
# Strike Price - Double - 8
o.strike = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Standard Option Flag - Byte - 1 (1 = true, 0 = false)
o.standardOptionFlag = unpack('b', data[cursor:cursor+1])[0]; cursor += 1
# Put/Call Indicator - Char - 2 (P or C in unicode)
o.pcIndicator = chr(unpack('>H', data[cursor:cursor+2])[0]); cursor += 2
# Option Symbol Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Option Symbol - String - Variable
o.optionSymbol = data[cursor:cursor+l]; cursor += l
# Option Description Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Option Description - String - Variable
o.optionDescription = data[cursor:cursor+l]; cursor += l
# Bid - Double - 8
o.bid = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Ask - Double - 8
o.ask = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Bid/Ask Size Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Bid/Ask Size - String - Variable
o.baSize = data[cursor:cursor+l]; cursor += l
# Last - Double - 8
o.last = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Last Trade Size Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Last Trade Size - String - Variable
o.lastTradeSize = data[cursor:cursor+l]; cursor += l
# Last Trade Date Length - short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Last Trade Date - String - Variable
o.lastTradeDate = data[cursor:cursor+l]; cursor += l
# Volume - Long - 8
o.volume = unpack('>Q',data[cursor:cursor+8])[0]; cursor += 8
# Open Interest - Integer - 4
o.openInterest = unpack('>i', data[cursor:cursor+4])[0]; cursor += 4
# RT Quote Flag - Byte - 1 (1=true, 0=false)
o.rtQuoteFlag = unpack('b', data[cursor:cursor+1])[0]; cursor += 1
o.setQuoteDateTime(quoteTime)
# Underlying Symbol length - Short 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Underlying Symbol - String - Variable
o.underlyingSymbol = data[cursor:cursor+l].decode('utf-8'); cursor += l
# Delta - Double- 8
# Gamma - Double - 8
# Theta - Double - 8
# Vega - Double - 8
# Rho - Double - 8
# Implied Volatility - Double - 8
# Time Value Index - Double - 8
# Multiplier - Double - 8
# Change - Double - 8
# Change Percentage - Double - 8
(o.delta, o.gamma, o.theta, o.vega, o.rho, o.impliedVolatility, o.tvIndex,
o.multiplier, o.change, o.changePercentage) = \
unpack('>10d', data[cursor:cursor+80]); cursor += 80
# ITM Flag - Byte - 1 (1 = true, 0 = false)
# NTM Flag - Byte - 1 (1 = true, 0 = false)
(o.itmFlag, o.ntmFlag) = unpack('2b', data[cursor:cursor+2]); cursor += 2
# Theoretical value - Double - 8
o.theoreticalValue = unpack('>d', data[cursor:cursor+8])[0]; cursor += 8
# Deliverable Note Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Deliverable Note - String - Variable
o.deliverableNote = data[cursor:cursor+l]; cursor += l
# CIL Dollar Amount - Double - 8
# OP Cash Dollar Amount - Double - 8
(o.cilDollarAmount, o.opCashDollarAmount) = \
unpack('>2d', data[cursor:cursor+16]); cursor += 16
# Index Option Flag - Byte - 1 (1 = true, 0 = false)
o.indexOptionFlag = unpack('b', data[cursor:cursor+1])[0]; cursor += 1
# Number of Deliverables - Integer - 4
numDeliverables = unpack('>i', data[cursor:cursor+4])[0]; cursor += 4
for j in range(numDeliverables):
# REPEATING block for each Deliverable
# Deliverable Symbol Length - Short - 2
l = unpack('>h', data[cursor:cursor+2])[0]; cursor += 2
# Deliverable Symbol - String - Variable
s = data[cursor:cursor+l]; cursor += l
# Deliverable Shares - Integer - 4
o.deliverables.append((s, unpack('>i', data[cursor:cursor+4])[0])); cursor += 4
# END
# Change all "nan" to None to make sure the oce is serializable
for k in list(o.__dict__.keys()):
if (type(o.__dict__[k]) == float) and math.isnan(o.__dict__[k]):
logging.info('[tdapi.getBinaryOptionChain] Converting o[%s]=nan to None' % (k))
o.__dict__[k] = None
return optionChain
def getPriceHistory(self, ticker, intervalType='DAILY', intervalDuration='1', periodType='MONTH',
period='1', startdate=None, enddate=None, extended=None):
validPeriodTypes = [
'DAY',
'MONTH',
'YEAR',
'YTD'
]
validIntervalTypes = {
'DAY': ['MINUTE'],
'MONTH': ['DAILY', 'WEEKLY'],
'YEAR': ['DAILY', 'WEEKLY', 'MONTHLY'],
'YTD': ['DAILY', 'WEEKLY']
}
arguments = {'source': self._sourceID,
'requestidentifiertype': 'SYMBOL',
'requestvalue': ticker,
'intervaltype': intervalType,
'intervalduration': intervalDuration,
'period': period,
'periodtype':periodType,
'startdate':startdate,
'enddate':enddate
}
# TODO: build params conditionally based on whether we're doing period-style request
# TODO: support start and end dates
validArgs = {}
for k in list(arguments.keys()):
if arguments[k] != None:
validArgs[k] = arguments[k]
params = urllib.parse.urlencode(validArgs)
logging.getLogger("requests").setLevel(logging.WARNING)
conn = http.client.HTTPSConnection('apis.tdameritrade.com')
conn.set_debuglevel(0)
conn.request('GET', '/apps/100/PriceHistory?'+params)
response = conn.getresponse()
if response.status != 200:
#import pdb; pdb.set_trace()
raise ValueError(response.reason)
data = response.read()
conn.close()
# The first 15 bytes are the header
# DATA TYPE DESCRIPTION
# 00 00 00 01 4 bytes Symbol Count =1
# 00 04 2 bytes Symbol Length = 4
# 41 4D 54 44 4 bytes Symbol = AMTD
# 00 1 byte Error code = 0 (OK)
# 00 00 00 02 4 bytes Bar Count = 2
cursor = 0
symbolCount = unpack('>i', data[0:4])[0]
if symbolCount > 1:
fp = open('tdapi_debug_dump','wb')
fp.write(data)
fp.close()
raise ValueError('Error - see tdapi_debug_dump')
symbolLength = unpack('>h', data[4:6])[0]
cursor = 6
symbol = data[cursor:cursor+symbolLength]
cursor += symbolLength
error = unpack('b', data[cursor:cursor+1])[0]
cursor += 1
# If there is an error, there will be an error length and corresponding error text
if error != 0:
errorLength = unpack('>h', data[cursor:cursor+2])[0]
# TODO: verify that this is correct below -- advance cursor for error length
cursor += 2
if errorLength > 0:
errorText = data[cursor:cursor+errorLength]
cursor += errorLength
raise ValueError('[getPriceHistory] Error: %s' % errorText)
barCount = unpack('>i', data[cursor:cursor+4])[0]
cursor += 4
# TODO: Add more rigorous checks on header data
# Now we need to extract the bars
bars = []
for i in range(barCount):
# Make sure we still have enough data for a bar and a terminator (note only one terminator at the end)
if cursor + 28 > len(data):
raise ValueError('Trying to read %d bytes from %d total!' % (cursor+58, len(data)))
C = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
H = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
L = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
O = unpack('>f', data[cursor:cursor+4])[0]
cursor += 4
V = unpack('>f', data[cursor:cursor+4])[0] * 100.0
cursor += 4
#T = time.gmtime(float(unpack('>Q',data[cursor:cursor+8])[0]) / 1000.0) # Returned in ms since the epoch
T = datetime.datetime.utcfromtimestamp(float(unpack('>Q',data[cursor:cursor+8])[0]) / 1000.0) # Returned in ms since the epoch
cursor += 8
bars.append((O,H,L,C,V,T))
# Finally we should see a terminator of FF
if data[cursor:cursor+2] != b'\xff\xff':
fp = open('tdapi_debug_dump','wb')
fp.write(data)
fp.close()
raise ValueError('Did not find terminator at hexdata[%d]! See tdapi_debug_dump' % cursor)
df = pandas.DataFrame(data=bars, columns=['open','high','low','close','volume','timestamp'])
return df
| mit |
RacerXx/GoAtThrottleUp | ServerRelay/cherrypy/test/test_json.py | 12 | 2497 | import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import json
class JsonTest(helper.CPWebCase):
def setup_server():
class Root(object):
def plain(self):
return 'hello'
plain.exposed = True
def json_string(self):
return 'hello'
json_string.exposed = True
json_string._cp_config = {'tools.json_out.on': True}
def json_list(self):
return ['a', 'b', 42]
json_list.exposed = True
json_list._cp_config = {'tools.json_out.on': True}
def json_dict(self):
return {'answer': 42}
json_dict.exposed = True
json_dict._cp_config = {'tools.json_out.on': True}
def json_post(self):
if cherrypy.request.json == [13, 'c']:
return 'ok'
else:
return 'nok'
json_post.exposed = True
json_post._cp_config = {'tools.json_in.on': True}
root = Root()
cherrypy.tree.mount(root)
setup_server = staticmethod(setup_server)
def test_json_output(self):
if json is None:
self.skip("json not found ")
return
self.getPage("/plain")
self.assertBody("hello")
self.getPage("/json_string")
self.assertBody('"hello"')
self.getPage("/json_list")
self.assertBody('["a", "b", 42]')
self.getPage("/json_dict")
self.assertBody('{"answer": 42}')
def test_json_input(self):
if json is None:
self.skip("json not found ")
return
body = '[13, "c"]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertBody('ok')
body = '[13, "c"]'
headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(415, 'Expected an application/json content type')
body = '[13, -]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(400, 'Invalid JSON document')
| mit |
alexmandujano/django | django/contrib/formtools/tests/wizard/namedwizardtests/tests.py | 127 | 15125 | from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView,
NamedUrlCookieWizardView)
from django.contrib.formtools.tests.wizard.test_forms import get_request, Step1, Step2
class NamedWizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
self.assertEqual(wizard['url_name'], self.wizard_urlname)
def test_initial_call_with_params(self):
get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
response = self.client.get(reverse('%s_start' % self.wizard_urlname),
get_params)
self.assertEqual(response.status_code, 302)
# Test for proper redirect GET parameters
location = response.url
self.assertNotEqual(location.find('?'), -1)
querydict = QueryDict(location[location.find('?') + 1:])
self.assertEqual(dict(querydict.items()), get_params)
def test_form_post_error(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={
'step': response.context['wizard']['steps'].current
}), {'wizard_goto_step': response.context['wizard']['steps'].prev})
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_jump(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form3'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
def test_form_finish(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(__file__, 'rb')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
with open(__file__, 'rb') as f:
self.assertEqual(all_data[1]['file1'].read(), f.read())
all_data[1]['file1'].close()
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'}, {'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__, 'rb')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'})
response = self.client.get(step2_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
with open(__file__, 'rb') as f:
self.assertEqual(
response.context['wizard']['form'].files['form2-file1'].read(),
f.read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
with open(__file__, 'rb') as f:
self.assertEqual(all_data['file1'].read(), f.read())
all_data['file1'].close()
del all_data['file1']
self.assertEqual(
all_data,
{'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}
]})
def test_manipulated_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(__file__, 'rb')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
loc = response.url
response = self.client.get(loc)
self.assertEqual(response.status_code, 200, loc)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_reset(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.get(
'%s?reset=1' % reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
@skipIfCustomUser
class NamedSessionWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_session'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class NamedCookieWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_cookie'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class NamedFormTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def test_revalidation(self):
request = get_request()
testform = self.formwizard_class.as_view(
[('start', Step1), ('step2', Step2)],
url_name=self.wizard_urlname)
response, instance = testform(request, step='done')
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs)
return response, self
@skipIfCustomUser
class NamedSessionFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlSessionWizardView
wizard_urlname = 'nwiz_session'
@skipIfCustomUser
class NamedCookieFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlCookieWizardView
wizard_urlname = 'nwiz_cookie'
| bsd-3-clause |
jungker/leetcode-python | remove_duplicates_sorted_listII.py | 1 | 1207 | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def printListNode(p):
while p:
print p.val,
p = p.next
print
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
res0 = res = ListNode(0)
p, q = head, head.next
while q:
if p.val != q.val:
res.next = p
res = res.next
else:
while p.next and p.val == p.next.val:
p = p.next
p = p.next if p else None
q = p.next if p else None
res.next = p
return res0.next
if __name__ == '__main__':
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(3)
head.next.next.next.next = ListNode(4)
head.next.next.next.next.next = ListNode(4)
head.next.next.next.next.next.next = ListNode(5)
solution = Solution()
res = solution.deleteDuplicates(head)
printListNode(res)
| mit |
ee08b397/LeetCode-4 | 097 Interleaving String.py | 2 | 2690 | """
Given s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.
For example,
Given:
s1 = "aabcc",
s2 = "dbbca",
When s3 = "aadbbcbcac", return true.
When s3 = "aadbbbaccc", return false.
"""
__author__ = 'Danyang'
class Solution:
def isInterleave_TLE(self, s1, s2, s3):
"""
dfs
Time Limit Exceeded
:param s1:
:param s2:
:param s3:
:return: boolean
"""
if not s3:
return True
letter = s3[0]
if s1 and s1[0]==letter:
if self.isInterleave(s1[1:], s2, s3[1:]):
return True
if s2 and s2[0]==letter:
if self.isInterleave(s1, s2[1:], s3[1:]):
return True
return False
def isInterleave(self, s1, s2, s3):
"""
dfs
dp
dp[i][j], for s3[:i+j] interleaved by s1[:i], s2[:j]
- d b b c a
- T F F F F F
a T F F F F F
a T T T T T F
b F T T F T F
c F F T T T T
c F F F T F T
notice the boundary condition
Thought:
dfs, easy to come up, but high space complexity
thus, dp
f[i][j] represents s3[:i+j] comes from s1[:i] and s2[:j]
two possible conditions:
1. s[i+j] = s[i]
2. s[i+j] = s[j]
others are false
f[i][j] = f[i-1][j] if s3[i+j]==s1[i]
= f[i][j-1] if s3[i+j]==s2[j]
= false
:type s1: str
:type s2: str
:type s3: str
:param s1:
:param s2:
:param s3:
:return: boolean
"""
m = len(s1)
n = len(s2)
if m+n!=len(s3):
return False
dp = [[False for _ in xrange(n+1)] for _ in xrange(m+1)]
# initialize boundary conditions
dp[0][0] = True
for i in xrange(1, m+1):
dp[i][0] = dp[i-1][0] and s3[i+0-1]==s1[i-1]
for j in xrange(1, n+1):
dp[0][j] = dp[0][j-1] and s3[0+j-1]==s2[j-1]
# calculating
for i in xrange(1, m+1):
for j in xrange(1, n+1):
if not dp[i][j]:
dp[i][j] = dp[i-1][j] and s3[i+j-1]==s1[i-1]
if not dp[i][j]:
dp[i][j] = dp[i][j-1] and s3[i+j-1]==s2[j-1]
return dp[-1][-1]
if __name__=="__main__":
assert Solution().isInterleave("aa", "ab", "abaa")==True
assert Solution().isInterleave("aabcc", "dbbca", "aadbbcbcac")==True
assert Solution().isInterleave("aabcc", "dbbca", "aadbbbaccc")==False | mit |
libscie/liberator | liberator/lib/python3.6/site-packages/pip/_vendor/requests/__init__.py | 327 | 2326 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.11.1'
__build__ = 0x021101
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
# Note: Patched by pip to prevent using the PyOpenSSL module. On Windows this
# prevents upgrading cryptography.
# try:
# from .packages.urllib3.contrib import pyopenssl
# pyopenssl.inject_into_urllib3()
# except ImportError:
# pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| cc0-1.0 |
youssef-poisson/angular | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 426 | 120645 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| gpl-2.0 |
kemalakyol48/python-for-android | python-modules/twisted/twisted/conch/ssh/transport.py | 49 | 52415 | # -*- test-case-name: twisted.conch.test.test_transport -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The lowest level SSH protocol. This handles the key negotiation, the
encryption and the compression. The transport layer is described in
RFC 4253.
Maintainer: Paul Swartz
"""
# base library imports
import struct
import zlib
import array
# external library imports
from Crypto import Util
from Crypto.Cipher import XOR
# twisted imports
from twisted.internet import protocol, defer
from twisted.conch import error
from twisted.python import log, randbytes
from twisted.python.hashlib import md5, sha1
# sibling imports
from twisted.conch.ssh import keys
from twisted.conch.ssh.common import NS, getNS, MP, getMP, _MPpow, ffs
def _getRandomNumber(random, bits):
"""
Generate a random number in the range [0, 2 ** bits).
@param bits: The number of bits in the result.
@type bits: C{int}
@rtype: C{int} or C{long}
@return: The newly generated random number.
@raise ValueError: if C{bits} is not a multiple of 8.
"""
if bits % 8:
raise ValueError("bits (%d) must be a multiple of 8" % (bits,))
bytes = random(bits / 8)
result = Util.number.bytes_to_long(bytes)
return result
def _generateX(random, bits):
"""
Generate a new value for the private key x.
From RFC 2631, section 2.2::
X9.42 requires that the private key x be in the interval
[2, (q - 2)]. x should be randomly generated in this interval.
"""
while True:
x = _getRandomNumber(random, bits)
if 2 <= x <= (2 ** bits) - 2:
return x
class SSHTransportBase(protocol.Protocol):
"""
Protocol supporting basic SSH functionality: sending/receiving packets
and message dispatch. To connect to or run a server, you must use
SSHClientTransport or SSHServerTransport.
@ivar protocolVersion: A string representing the version of the SSH
protocol we support. Currently defaults to '2.0'.
@ivar version: A string representing the version of the server or client.
Currently defaults to 'Twisted'.
@ivar comment: An optional string giving more information about the
server or client.
@ivar supportedCiphers: A list of strings representing the encryption
algorithms supported, in order from most-preferred to least.
@ivar supportedMACs: A list of strings representing the message
authentication codes (hashes) supported, in order from most-preferred
to least. Both this and supportedCiphers can include 'none' to use
no encryption or authentication, but that must be done manually,
@ivar supportedKeyExchanges: A list of strings representing the
key exchanges supported, in order from most-preferred to least.
@ivar supportedPublicKeys: A list of strings representing the
public key types supported, in order from most-preferred to least.
@ivar supportedCompressions: A list of strings representing compression
types supported, from most-preferred to least.
@ivar supportedLanguages: A list of strings representing languages
supported, from most-preferred to least.
@ivar supportedVersions: A container of strings representing supported ssh
protocol version numbers.
@ivar isClient: A boolean indicating whether this is a client or server.
@ivar gotVersion: A boolean indicating whether we have receieved the
version string from the other side.
@ivar buf: Data we've received but hasn't been parsed into a packet.
@ivar outgoingPacketSequence: the sequence number of the next packet we
will send.
@ivar incomingPacketSequence: the sequence number of the next packet we
are expecting from the other side.
@ivar outgoingCompression: an object supporting the .compress(str) and
.flush() methods, or None if there is no outgoing compression. Used to
compress outgoing data.
@ivar outgoingCompressionType: A string representing the outgoing
compression type.
@ivar incomingCompression: an object supporting the .decompress(str)
method, or None if there is no incoming compression. Used to
decompress incoming data.
@ivar incomingCompressionType: A string representing the incoming
compression type.
@ivar ourVersionString: the version string that we sent to the other side.
Used in the key exchange.
@ivar otherVersionString: the version string sent by the other side. Used
in the key exchange.
@ivar ourKexInitPayload: the MSG_KEXINIT payload we sent. Used in the key
exchange.
@ivar otherKexInitPayload: the MSG_KEXINIT payload we received. Used in
the key exchange
@ivar sessionID: a string that is unique to this SSH session. Created as
part of the key exchange, sessionID is used to generate the various
encryption and authentication keys.
@ivar service: an SSHService instance, or None. If it's set to an object,
it's the currently running service.
@ivar kexAlg: the agreed-upon key exchange algorithm.
@ivar keyAlg: the agreed-upon public key type for the key exchange.
@ivar currentEncryptions: an SSHCiphers instance. It represents the
current encryption and authentication options for the transport.
@ivar nextEncryptions: an SSHCiphers instance. Held here until the
MSG_NEWKEYS messages are exchanged, when nextEncryptions is
transitioned to currentEncryptions.
@ivar first: the first bytes of the next packet. In order to avoid
decrypting data twice, the first bytes are decrypted and stored until
the whole packet is available.
"""
protocolVersion = '2.0'
version = 'Twisted'
comment = ''
ourVersionString = ('SSH-' + protocolVersion + '-' + version + ' '
+ comment).strip()
supportedCiphers = ['aes256-ctr', 'aes256-cbc', 'aes192-ctr', 'aes192-cbc',
'aes128-ctr', 'aes128-cbc', 'cast128-ctr',
'cast128-cbc', 'blowfish-ctr', 'blowfish-cbc',
'3des-ctr', '3des-cbc'] # ,'none']
supportedMACs = ['hmac-sha1', 'hmac-md5'] # , 'none']
# both of the above support 'none', but for security are disabled by
# default. to enable them, subclass this class and add it, or do:
# SSHTransportBase.supportedCiphers.append('none')
supportedKeyExchanges = ['diffie-hellman-group-exchange-sha1',
'diffie-hellman-group1-sha1']
supportedPublicKeys = ['ssh-rsa', 'ssh-dss']
supportedCompressions = ['none', 'zlib']
supportedLanguages = ()
supportedVersions = ('1.99', '2.0')
isClient = False
gotVersion = False
buf = ''
outgoingPacketSequence = 0
incomingPacketSequence = 0
outgoingCompression = None
incomingCompression = None
sessionID = None
service = None
def connectionLost(self, reason):
if self.service:
self.service.serviceStopped()
if hasattr(self, 'avatar'):
self.logoutFunction()
log.msg('connection lost')
def connectionMade(self):
"""
Called when the connection is made to the other side. We sent our
version and the MSG_KEXINIT packet.
"""
self.transport.write('%s\r\n' % (self.ourVersionString,))
self.currentEncryptions = SSHCiphers('none', 'none', 'none', 'none')
self.currentEncryptions.setKeys('', '', '', '', '', '')
self.sendKexInit()
def sendKexInit(self):
self.ourKexInitPayload = (chr(MSG_KEXINIT) +
randbytes.secureRandom(16) +
NS(','.join(self.supportedKeyExchanges)) +
NS(','.join(self.supportedPublicKeys)) +
NS(','.join(self.supportedCiphers)) +
NS(','.join(self.supportedCiphers)) +
NS(','.join(self.supportedMACs)) +
NS(','.join(self.supportedMACs)) +
NS(','.join(self.supportedCompressions)) +
NS(','.join(self.supportedCompressions)) +
NS(','.join(self.supportedLanguages)) +
NS(','.join(self.supportedLanguages)) +
'\000' + '\000\000\000\000')
self.sendPacket(MSG_KEXINIT, self.ourKexInitPayload[1:])
def sendPacket(self, messageType, payload):
"""
Sends a packet. If it's been set up, compress the data, encrypt it,
and authenticate it before sending.
@param messageType: The type of the packet; generally one of the
MSG_* values.
@type messageType: C{int}
@param payload: The payload for the message.
@type payload: C{str}
"""
payload = chr(messageType) + payload
if self.outgoingCompression:
payload = (self.outgoingCompression.compress(payload)
+ self.outgoingCompression.flush(2))
bs = self.currentEncryptions.encBlockSize
# 4 for the packet length and 1 for the padding length
totalSize = 5 + len(payload)
lenPad = bs - (totalSize % bs)
if lenPad < 4:
lenPad = lenPad + bs
packet = (struct.pack('!LB',
totalSize + lenPad - 4, lenPad) +
payload + randbytes.secureRandom(lenPad))
encPacket = (
self.currentEncryptions.encrypt(packet) +
self.currentEncryptions.makeMAC(
self.outgoingPacketSequence, packet))
self.transport.write(encPacket)
self.outgoingPacketSequence += 1
def getPacket(self):
"""
Try to return a decrypted, authenticated, and decompressed packet
out of the buffer. If there is not enough data, return None.
@rtype: C{str}/C{None}
"""
bs = self.currentEncryptions.decBlockSize
ms = self.currentEncryptions.verifyDigestSize
if len(self.buf) < bs: return # not enough data
if not hasattr(self, 'first'):
first = self.currentEncryptions.decrypt(self.buf[:bs])
else:
first = self.first
del self.first
packetLen, paddingLen = struct.unpack('!LB', first[:5])
if packetLen > 1048576: # 1024 ** 2
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
'bad packet length %s' % packetLen)
return
if len(self.buf) < packetLen + 4 + ms:
self.first = first
return # not enough packet
if(packetLen + 4) % bs != 0:
self.sendDisconnect(
DISCONNECT_PROTOCOL_ERROR,
'bad packet mod (%i%%%i == %i)' % (packetLen + 4, bs,
(packetLen + 4) % bs))
return
encData, self.buf = self.buf[:4 + packetLen], self.buf[4 + packetLen:]
packet = first + self.currentEncryptions.decrypt(encData[bs:])
if len(packet) != 4 + packetLen:
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
'bad decryption')
return
if ms:
macData, self.buf = self.buf[:ms], self.buf[ms:]
if not self.currentEncryptions.verify(self.incomingPacketSequence,
packet, macData):
self.sendDisconnect(DISCONNECT_MAC_ERROR, 'bad MAC')
return
payload = packet[5:-paddingLen]
if self.incomingCompression:
try:
payload = self.incomingCompression.decompress(payload)
except: # bare except, because who knows what kind of errors
# decompression can raise
log.err()
self.sendDisconnect(DISCONNECT_COMPRESSION_ERROR,
'compression error')
return
self.incomingPacketSequence += 1
return payload
def _unsupportedVersionReceived(self, remoteVersion):
"""
Called when an unsupported version of the ssh protocol is received from
the remote endpoint.
@param remoteVersion: remote ssh protocol version which is unsupported
by us.
@type remoteVersion: C{str}
"""
self.sendDisconnect(DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED,
'bad version ' + remoteVersion)
def dataReceived(self, data):
"""
First, check for the version string (SSH-2.0-*). After that has been
received, this method adds data to the buffer, and pulls out any
packets.
@type data: C{str}
"""
self.buf = self.buf + data
if not self.gotVersion:
if self.buf.find('\n', self.buf.find('SSH-')) == -1:
return
lines = self.buf.split('\n')
for p in lines:
if p.startswith('SSH-'):
self.gotVersion = True
self.otherVersionString = p.strip()
remoteVersion = p.split('-')[1]
if remoteVersion not in self.supportedVersions:
self._unsupportedVersionReceived(remoteVersion)
return
i = lines.index(p)
self.buf = '\n'.join(lines[i + 1:])
packet = self.getPacket()
while packet:
messageNum = ord(packet[0])
self.dispatchMessage(messageNum, packet[1:])
packet = self.getPacket()
def dispatchMessage(self, messageNum, payload):
"""
Send a received message to the appropriate method.
@type messageNum: C{int}
@type payload: c{str}
"""
if messageNum < 50 and messageNum in messages:
messageType = messages[messageNum][4:]
f = getattr(self, 'ssh_%s' % messageType, None)
if f is not None:
f(payload)
else:
log.msg("couldn't handle %s" % messageType)
log.msg(repr(payload))
self.sendUnimplemented()
elif self.service:
log.callWithLogger(self.service, self.service.packetReceived,
messageNum, payload)
else:
log.msg("couldn't handle %s" % messageNum)
log.msg(repr(payload))
self.sendUnimplemented()
def ssh_KEXINIT(self, packet):
"""
Called when we receive a MSG_KEXINIT message. Payload::
bytes[16] cookie
string keyExchangeAlgorithms
string keyAlgorithms
string incomingEncryptions
string outgoingEncryptions
string incomingAuthentications
string outgoingAuthentications
string incomingCompressions
string outgoingCompressions
string incomingLanguages
string outgoingLanguages
bool firstPacketFollows
unit32 0 (reserved)
Starts setting up the key exchange, keys, encryptions, and
authentications. Extended by ssh_KEXINIT in SSHServerTransport and
SSHClientTransport.
"""
self.otherKexInitPayload = chr(MSG_KEXINIT) + packet
#cookie = packet[: 16] # taking this is useless
k = getNS(packet[16:], 10)
strings, rest = k[:-1], k[-1]
(kexAlgs, keyAlgs, encCS, encSC, macCS, macSC, compCS, compSC, langCS,
langSC) = [s.split(',') for s in strings]
# these are the server directions
outs = [encSC, macSC, compSC]
ins = [encCS, macSC, compCS]
if self.isClient:
outs, ins = ins, outs # switch directions
server = (self.supportedKeyExchanges, self.supportedPublicKeys,
self.supportedCiphers, self.supportedCiphers,
self.supportedMACs, self.supportedMACs,
self.supportedCompressions, self.supportedCompressions)
client = (kexAlgs, keyAlgs, outs[0], ins[0], outs[1], ins[1],
outs[2], ins[2])
if self.isClient:
server, client = client, server
self.kexAlg = ffs(client[0], server[0])
self.keyAlg = ffs(client[1], server[1])
self.nextEncryptions = SSHCiphers(
ffs(client[2], server[2]),
ffs(client[3], server[3]),
ffs(client[4], server[4]),
ffs(client[5], server[5]))
self.outgoingCompressionType = ffs(client[6], server[6])
self.incomingCompressionType = ffs(client[7], server[7])
if None in (self.kexAlg, self.keyAlg, self.outgoingCompressionType,
self.incomingCompressionType):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
"couldn't match all kex parts")
return
if None in self.nextEncryptions.__dict__.values():
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
"couldn't match all kex parts")
return
log.msg('kex alg, key alg: %s %s' % (self.kexAlg, self.keyAlg))
log.msg('outgoing: %s %s %s' % (self.nextEncryptions.outCipType,
self.nextEncryptions.outMACType,
self.outgoingCompressionType))
log.msg('incoming: %s %s %s' % (self.nextEncryptions.inCipType,
self.nextEncryptions.inMACType,
self.incomingCompressionType))
return kexAlgs, keyAlgs, rest # for SSHServerTransport to use
def ssh_DISCONNECT(self, packet):
"""
Called when we receive a MSG_DISCONNECT message. Payload::
long code
string description
This means that the other side has disconnected. Pass the message up
and disconnect ourselves.
"""
reasonCode = struct.unpack('>L', packet[: 4])[0]
description, foo = getNS(packet[4:])
self.receiveError(reasonCode, description)
self.transport.loseConnection()
def ssh_IGNORE(self, packet):
"""
Called when we receieve a MSG_IGNORE message. No payload.
This means nothing; we simply return.
"""
def ssh_UNIMPLEMENTED(self, packet):
"""
Called when we receieve a MSG_UNIMPLEMENTED message. Payload::
long packet
This means that the other side did not implement one of our packets.
"""
seqnum, = struct.unpack('>L', packet)
self.receiveUnimplemented(seqnum)
def ssh_DEBUG(self, packet):
"""
Called when we receieve a MSG_DEBUG message. Payload::
bool alwaysDisplay
string message
string language
This means the other side has passed along some debugging info.
"""
alwaysDisplay = bool(packet[0])
message, lang, foo = getNS(packet[1:], 2)
self.receiveDebug(alwaysDisplay, message, lang)
def setService(self, service):
"""
Set our service to service and start it running. If we were
running a service previously, stop it first.
@type service: C{SSHService}
"""
log.msg('starting service %s' % service.name)
if self.service:
self.service.serviceStopped()
self.service = service
service.transport = self
self.service.serviceStarted()
def sendDebug(self, message, alwaysDisplay=False, language=''):
"""
Send a debug message to the other side.
@param message: the message to send.
@type message: C{str}
@param alwaysDisplay: if True, tell the other side to always
display this message.
@type alwaysDisplay: C{bool}
@param language: optionally, the language the message is in.
@type language: C{str}
"""
self.sendPacket(MSG_DEBUG, chr(alwaysDisplay) + NS(message) +
NS(language))
def sendIgnore(self, message):
"""
Send a message that will be ignored by the other side. This is
useful to fool attacks based on guessing packet sizes in the
encrypted stream.
@param message: data to send with the message
@type message: C{str}
"""
self.sendPacket(MSG_IGNORE, NS(message))
def sendUnimplemented(self):
"""
Send a message to the other side that the last packet was not
understood.
"""
seqnum = self.incomingPacketSequence
self.sendPacket(MSG_UNIMPLEMENTED, struct.pack('!L', seqnum))
def sendDisconnect(self, reason, desc):
"""
Send a disconnect message to the other side and then disconnect.
@param reason: the reason for the disconnect. Should be one of the
DISCONNECT_* values.
@type reason: C{int}
@param desc: a descrption of the reason for the disconnection.
@type desc: C{str}
"""
self.sendPacket(
MSG_DISCONNECT, struct.pack('>L', reason) + NS(desc) + NS(''))
log.msg('Disconnecting with error, code %s\nreason: %s' % (reason,
desc))
self.transport.loseConnection()
def _getKey(self, c, sharedSecret, exchangeHash):
"""
Get one of the keys for authentication/encryption.
@type c: C{str}
@type sharedSecret: C{str}
@type exchangeHash: C{str}
"""
k1 = sha1(sharedSecret + exchangeHash + c + self.sessionID)
k1 = k1.digest()
k2 = sha1(sharedSecret + exchangeHash + k1).digest()
return k1 + k2
def _keySetup(self, sharedSecret, exchangeHash):
"""
Set up the keys for the connection and sends MSG_NEWKEYS when
finished,
@param sharedSecret: a secret string agreed upon using a Diffie-
Hellman exchange, so it is only shared between
the server and the client.
@type sharedSecret: C{str}
@param exchangeHash: A hash of various data known by both sides.
@type exchangeHash: C{str}
"""
if not self.sessionID:
self.sessionID = exchangeHash
initIVCS = self._getKey('A', sharedSecret, exchangeHash)
initIVSC = self._getKey('B', sharedSecret, exchangeHash)
encKeyCS = self._getKey('C', sharedSecret, exchangeHash)
encKeySC = self._getKey('D', sharedSecret, exchangeHash)
integKeyCS = self._getKey('E', sharedSecret, exchangeHash)
integKeySC = self._getKey('F', sharedSecret, exchangeHash)
outs = [initIVSC, encKeySC, integKeySC]
ins = [initIVCS, encKeyCS, integKeyCS]
if self.isClient: # reverse for the client
log.msg('REVERSE')
outs, ins = ins, outs
self.nextEncryptions.setKeys(outs[0], outs[1], ins[0], ins[1],
outs[2], ins[2])
self.sendPacket(MSG_NEWKEYS, '')
def isEncrypted(self, direction="out"):
"""
Return True if the connection is encrypted in the given direction.
Direction must be one of ["out", "in", "both"].
"""
if direction == "out":
return self.currentEncryptions.outCipType != 'none'
elif direction == "in":
return self.currentEncryptions.inCipType != 'none'
elif direction == "both":
return self.isEncrypted("in") and self.isEncrypted("out")
else:
raise TypeError('direction must be "out", "in", or "both"')
def isVerified(self, direction="out"):
"""
Return True if the connecction is verified/authenticated in the
given direction. Direction must be one of ["out", "in", "both"].
"""
if direction == "out":
return self.currentEncryptions.outMACType != 'none'
elif direction == "in":
return self.currentEncryptions.inMACType != 'none'
elif direction == "both":
return self.isVerified("in")and self.isVerified("out")
else:
raise TypeError('direction must be "out", "in", or "both"')
def loseConnection(self):
"""
Lose the connection to the other side, sending a
DISCONNECT_CONNECTION_LOST message.
"""
self.sendDisconnect(DISCONNECT_CONNECTION_LOST,
"user closed connection")
# client methods
def receiveError(self, reasonCode, description):
"""
Called when we receive a disconnect error message from the other
side.
@param reasonCode: the reason for the disconnect, one of the
DISCONNECT_ values.
@type reasonCode: C{int}
@param description: a human-readable description of the
disconnection.
@type description: C{str}
"""
log.msg('Got remote error, code %s\nreason: %s' % (reasonCode,
description))
def receiveUnimplemented(self, seqnum):
"""
Called when we receive an unimplemented packet message from the other
side.
@param seqnum: the sequence number that was not understood.
@type seqnum: C{int}
"""
log.msg('other side unimplemented packet #%s' % seqnum)
def receiveDebug(self, alwaysDisplay, message, lang):
"""
Called when we receive a debug message from the other side.
@param alwaysDisplay: if True, this message should always be
displayed.
@type alwaysDisplay: C{bool}
@param message: the debug message
@type message: C{str}
@param lang: optionally the language the message is in.
@type lang: C{str}
"""
if alwaysDisplay:
log.msg('Remote Debug Message: %s' % message)
class SSHServerTransport(SSHTransportBase):
"""
SSHServerTransport implements the server side of the SSH protocol.
@ivar isClient: since we are never the client, this is always False.
@ivar ignoreNextPacket: if True, ignore the next key exchange packet. This
is set when the client sends a guessed key exchange packet but with
an incorrect guess.
@ivar dhGexRequest: the KEX_DH_GEX_REQUEST(_OLD) that the client sent.
The key generation needs this to be stored.
@ivar g: the Diffie-Hellman group generator.
@ivar p: the Diffie-Hellman group prime.
"""
isClient = False
ignoreNextPacket = 0
def ssh_KEXINIT(self, packet):
"""
Called when we receive a MSG_KEXINIT message. For a description
of the packet, see SSHTransportBase.ssh_KEXINIT(). Additionally,
this method checks if a guessed key exchange packet was sent. If
it was sent, and it guessed incorrectly, the next key exchange
packet MUST be ignored.
"""
retval = SSHTransportBase.ssh_KEXINIT(self, packet)
if not retval: # disconnected
return
else:
kexAlgs, keyAlgs, rest = retval
if ord(rest[0]): # first_kex_packet_follows
if (kexAlgs[0] != self.supportedKeyExchanges[0] or
keyAlgs[0] != self.supportedPublicKeys[0]):
self.ignoreNextPacket = True # guess was wrong
def ssh_KEX_DH_GEX_REQUEST_OLD(self, packet):
"""
This represents two different key exchange methods that share the
same integer value.
KEXDH_INIT (for diffie-hellman-group1-sha1 exchanges) payload::
integer e (the client's Diffie-Hellman public key)
We send the KEXDH_REPLY with our host key and signature.
KEX_DH_GEX_REQUEST_OLD (for diffie-hellman-group-exchange-sha1)
payload::
integer ideal (ideal size for the Diffie-Hellman prime)
We send the KEX_DH_GEX_GROUP message with the group that is
closest in size to ideal.
If we were told to ignore the next key exchange packet by
ssh_KEXINIT, drop it on the floor and return.
"""
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
if self.kexAlg == 'diffie-hellman-group1-sha1':
# this is really KEXDH_INIT
clientDHpublicKey, foo = getMP(packet)
y = _getRandomNumber(randbytes.secureRandom, 512)
serverDHpublicKey = _MPpow(DH_GENERATOR, y, DH_PRIME)
sharedSecret = _MPpow(clientDHpublicKey, y, DH_PRIME)
h = sha1()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.otherKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.factory.publicKeys[self.keyAlg].blob()))
h.update(MP(clientDHpublicKey))
h.update(serverDHpublicKey)
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(
MSG_KEXDH_REPLY,
NS(self.factory.publicKeys[self.keyAlg].blob()) +
serverDHpublicKey +
NS(self.factory.privateKeys[self.keyAlg].sign(exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
elif self.kexAlg == 'diffie-hellman-group-exchange-sha1':
self.dhGexRequest = packet
ideal = struct.unpack('>L', packet)[0]
self.g, self.p = self.factory.getDHPrime(ideal)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p) + MP(self.g))
else:
raise error.ConchError('bad kexalg: %s' % self.kexAlg)
def ssh_KEX_DH_GEX_REQUEST(self, packet):
"""
Called when we receive a MSG_KEX_DH_GEX_REQUEST message. Payload::
integer minimum
integer ideal
integer maximum
The client is asking for a Diffie-Hellman group between minimum and
maximum size, and close to ideal if possible. We reply with a
MSG_KEX_DH_GEX_GROUP message.
If we were told to ignore the next key exchange packekt by
ssh_KEXINIT, drop it on the floor and return.
"""
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
self.dhGexRequest = packet
min, ideal, max = struct.unpack('>3L', packet)
self.g, self.p = self.factory.getDHPrime(ideal)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p) + MP(self.g))
def ssh_KEX_DH_GEX_INIT(self, packet):
"""
Called when we get a MSG_KEX_DH_GEX_INIT message. Payload::
integer e (client DH public key)
We send the MSG_KEX_DH_GEX_REPLY message with our host key and
signature.
"""
clientDHpublicKey, foo = getMP(packet)
# TODO: we should also look at the value they send to us and reject
# insecure values of f (if g==2 and f has a single '1' bit while the
# rest are '0's, then they must have used a small y also).
# TODO: This could be computed when self.p is set up
# or do as openssh does and scan f for a single '1' bit instead
pSize = Util.number.size(self.p)
y = _getRandomNumber(randbytes.secureRandom, pSize)
serverDHpublicKey = _MPpow(self.g, y, self.p)
sharedSecret = _MPpow(clientDHpublicKey, y, self.p)
h = sha1()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.otherKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.factory.publicKeys[self.keyAlg].blob()))
h.update(self.dhGexRequest)
h.update(MP(self.p))
h.update(MP(self.g))
h.update(MP(clientDHpublicKey))
h.update(serverDHpublicKey)
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(
MSG_KEX_DH_GEX_REPLY,
NS(self.factory.publicKeys[self.keyAlg].blob()) +
serverDHpublicKey +
NS(self.factory.privateKeys[self.keyAlg].sign(exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
def ssh_NEWKEYS(self, packet):
"""
Called when we get a MSG_NEWKEYS message. No payload.
When we get this, the keys have been set on both sides, and we
start using them to encrypt and authenticate the connection.
"""
log.msg('NEW KEYS')
if packet != '':
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
"NEWKEYS takes no data")
return
self.currentEncryptions = self.nextEncryptions
if self.outgoingCompressionType == 'zlib':
self.outgoingCompression = zlib.compressobj(6)
if self.incomingCompressionType == 'zlib':
self.incomingCompression = zlib.decompressobj()
def ssh_SERVICE_REQUEST(self, packet):
"""
Called when we get a MSG_SERVICE_REQUEST message. Payload::
string serviceName
The client has requested a service. If we can start the service,
start it; otherwise, disconnect with
DISCONNECT_SERVICE_NOT_AVAILABLE.
"""
service, rest = getNS(packet)
cls = self.factory.getService(self, service)
if not cls:
self.sendDisconnect(DISCONNECT_SERVICE_NOT_AVAILABLE,
"don't have service %s" % service)
return
else:
self.sendPacket(MSG_SERVICE_ACCEPT, NS(service))
self.setService(cls())
class SSHClientTransport(SSHTransportBase):
"""
SSHClientTransport implements the client side of the SSH protocol.
@ivar isClient: since we are always the client, this is always True.
@ivar _gotNewKeys: if we receive a MSG_NEWKEYS message before we are
ready to transition to the new keys, this is set to True so we
can transition when the keys are ready locally.
@ivar x: our Diffie-Hellman private key.
@ivar e: our Diffie-Hellman public key.
@ivar g: the Diffie-Hellman group generator.
@ivar p: the Diffie-Hellman group prime
@ivar instance: the SSHService object we are requesting.
"""
isClient = True
def connectionMade(self):
"""
Called when the connection is started with the server. Just sets
up a private instance variable.
"""
SSHTransportBase.connectionMade(self)
self._gotNewKeys = 0
def ssh_KEXINIT(self, packet):
"""
Called when we receive a MSG_KEXINIT message. For a description
of the packet, see SSHTransportBase.ssh_KEXINIT(). Additionally,
this method sends the first key exchange packet. If the agreed-upon
exchange is diffie-hellman-group1-sha1, generate a public key
and send it in a MSG_KEXDH_INIT message. If the exchange is
diffie-hellman-group-exchange-sha1, ask for a 2048 bit group with a
MSG_KEX_DH_GEX_REQUEST_OLD message.
"""
if SSHTransportBase.ssh_KEXINIT(self, packet) is None:
return # we disconnected
if self.kexAlg == 'diffie-hellman-group1-sha1':
self.x = _generateX(randbytes.secureRandom, 512)
self.e = _MPpow(DH_GENERATOR, self.x, DH_PRIME)
self.sendPacket(MSG_KEXDH_INIT, self.e)
elif self.kexAlg == 'diffie-hellman-group-exchange-sha1':
self.sendPacket(MSG_KEX_DH_GEX_REQUEST_OLD, '\x00\x00\x08\x00')
else:
raise error.ConchError("somehow, the kexAlg has been set "
"to something we don't support")
def ssh_KEX_DH_GEX_GROUP(self, packet):
"""
This handles two different message which share an integer value.
If the key exchange is diffie-hellman-group1-sha1, this is
MSG_KEXDH_REPLY. Payload::
string serverHostKey
integer f (server Diffie-Hellman public key)
string signature
We verify the host key by calling verifyHostKey, then continue in
_continueKEXDH_REPLY.
If the key exchange is diffie-hellman-group-exchange-sha1, this is
MSG_KEX_DH_GEX_GROUP. Payload::
string g (group generator)
string p (group prime)
We generate a Diffie-Hellman public key and send it in a
MSG_KEX_DH_GEX_INIT message.
"""
if self.kexAlg == 'diffie-hellman-group1-sha1':
# actually MSG_KEXDH_REPLY
pubKey, packet = getNS(packet)
f, packet = getMP(packet)
signature, packet = getNS(packet)
fingerprint = ':'.join([ch.encode('hex') for ch in
md5(pubKey).digest()])
d = self.verifyHostKey(pubKey, fingerprint)
d.addCallback(self._continueKEXDH_REPLY, pubKey, f, signature)
d.addErrback(
lambda unused: self.sendDisconnect(
DISCONNECT_HOST_KEY_NOT_VERIFIABLE, 'bad host key'))
return d
else:
self.p, rest = getMP(packet)
self.g, rest = getMP(rest)
self.x = _generateX(randbytes.secureRandom, 320)
self.e = _MPpow(self.g, self.x, self.p)
self.sendPacket(MSG_KEX_DH_GEX_INIT, self.e)
def _continueKEXDH_REPLY(self, ignored, pubKey, f, signature):
"""
The host key has been verified, so we generate the keys.
@param pubKey: the public key blob for the server's public key.
@type pubKey: C{str}
@param f: the server's Diffie-Hellman public key.
@type f: C{long}
@param signature: the server's signature, verifying that it has the
correct private key.
@type signature: C{str}
"""
serverKey = keys.Key.fromString(pubKey)
sharedSecret = _MPpow(f, self.x, DH_PRIME)
h = sha1()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.otherKexInitPayload))
h.update(NS(pubKey))
h.update(self.e)
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
if not serverKey.verify(signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
'bad signature')
return
self._keySetup(sharedSecret, exchangeHash)
def ssh_KEX_DH_GEX_REPLY(self, packet):
"""
Called when we receieve a MSG_KEX_DH_GEX_REPLY message. Payload::
string server host key
integer f (server DH public key)
We verify the host key by calling verifyHostKey, then continue in
_continueGEX_REPLY.
"""
pubKey, packet = getNS(packet)
f, packet = getMP(packet)
signature, packet = getNS(packet)
fingerprint = ':'.join(map(lambda c: '%02x'%ord(c),
md5(pubKey).digest()))
d = self.verifyHostKey(pubKey, fingerprint)
d.addCallback(self._continueGEX_REPLY, pubKey, f, signature)
d.addErrback(
lambda unused: self.sendDisconnect(
DISCONNECT_HOST_KEY_NOT_VERIFIABLE, 'bad host key'))
return d
def _continueGEX_REPLY(self, ignored, pubKey, f, signature):
"""
The host key has been verified, so we generate the keys.
@param pubKey: the public key blob for the server's public key.
@type pubKey: C{str}
@param f: the server's Diffie-Hellman public key.
@type f: C{long}
@param signature: the server's signature, verifying that it has the
correct private key.
@type signature: C{str}
"""
serverKey = keys.Key.fromString(pubKey)
sharedSecret = _MPpow(f, self.x, self.p)
h = sha1()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.otherKexInitPayload))
h.update(NS(pubKey))
h.update('\x00\x00\x08\x00')
h.update(MP(self.p))
h.update(MP(self.g))
h.update(self.e)
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
if not serverKey.verify(signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
'bad signature')
return
self._keySetup(sharedSecret, exchangeHash)
def _keySetup(self, sharedSecret, exchangeHash):
"""
See SSHTransportBase._keySetup().
"""
SSHTransportBase._keySetup(self, sharedSecret, exchangeHash)
if self._gotNewKeys:
self.ssh_NEWKEYS('')
def ssh_NEWKEYS(self, packet):
"""
Called when we receieve a MSG_NEWKEYS message. No payload.
If we've finished setting up our own keys, start using them.
Otherwise, remeber that we've receieved this message.
"""
if packet != '':
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
"NEWKEYS takes no data")
return
if not self.nextEncryptions.encBlockSize:
self._gotNewKeys = 1
return
log.msg('NEW KEYS')
self.currentEncryptions = self.nextEncryptions
if self.outgoingCompressionType == 'zlib':
self.outgoingCompression = zlib.compressobj(6)
if self.incomingCompressionType == 'zlib':
self.incomingCompression = zlib.decompressobj()
self.connectionSecure()
def ssh_SERVICE_ACCEPT(self, packet):
"""
Called when we receieve a MSG_SERVICE_ACCEPT message. Payload::
string service name
Start the service we requested.
"""
name = getNS(packet)[0]
if name != self.instance.name:
self.sendDisconnect(
DISCONNECT_PROTOCOL_ERROR,
"received accept for service we did not request")
self.setService(self.instance)
def requestService(self, instance):
"""
Request that a service be run over this transport.
@type instance: subclass of L{twisted.conch.ssh.service.SSHService}
"""
self.sendPacket(MSG_SERVICE_REQUEST, NS(instance.name))
self.instance = instance
# client methods
def verifyHostKey(self, hostKey, fingerprint):
"""
Returns a Deferred that gets a callback if it is a valid key, or
an errback if not.
@type hostKey: C{str}
@type fingerprint: C{str}
@rtype: L{twisted.internet.defer.Deferred}
"""
# return if it's good
return defer.fail(NotImplementedError())
def connectionSecure(self):
"""
Called when the encryption has been set up. Generally,
requestService() is called to run another service over the transport.
"""
raise NotImplementedError()
class _DummyCipher:
"""
A cipher for the none encryption method.
@ivar block_size: the block size of the encryption. In the case of the
none cipher, this is 8 bytes.
"""
block_size = 8
def encrypt(self, x):
return x
decrypt = encrypt
class SSHCiphers:
"""
SSHCiphers represents all the encryption operations that need to occur
to encrypt and authenticate the SSH connection.
@cvar cipherMap: A dictionary mapping SSH encryption names to 3-tuples of
(<Crypto.Cipher.* name>, <block size>, <counter mode>)
@cvar macMap: A dictionary mapping SSH MAC names to hash modules.
@ivar outCipType: the string type of the outgoing cipher.
@ivar inCipType: the string type of the incoming cipher.
@ivar outMACType: the string type of the incoming MAC.
@ivar inMACType: the string type of the incoming MAC.
@ivar encBlockSize: the block size of the outgoing cipher.
@ivar decBlockSize: the block size of the incoming cipher.
@ivar verifyDigestSize: the size of the incoming MAC.
@ivar outMAC: a tuple of (<hash module>, <inner key>, <outer key>,
<digest size>) representing the outgoing MAC.
@ivar inMAc: see outMAC, but for the incoming MAC.
"""
cipherMap = {
'3des-cbc':('DES3', 24, 0),
'blowfish-cbc':('Blowfish', 16,0 ),
'aes256-cbc':('AES', 32, 0),
'aes192-cbc':('AES', 24, 0),
'aes128-cbc':('AES', 16, 0),
'cast128-cbc':('CAST', 16, 0),
'aes128-ctr':('AES', 16, 1),
'aes192-ctr':('AES', 24, 1),
'aes256-ctr':('AES', 32, 1),
'3des-ctr':('DES3', 24, 1),
'blowfish-ctr':('Blowfish', 16, 1),
'cast128-ctr':('CAST', 16, 1),
'none':(None, 0, 0),
}
macMap = {
'hmac-sha1': sha1,
'hmac-md5': md5,
'none': None
}
def __init__(self, outCip, inCip, outMac, inMac):
self.outCipType = outCip
self.inCipType = inCip
self.outMACType = outMac
self.inMACType = inMac
self.encBlockSize = 0
self.decBlockSize = 0
self.verifyDigestSize = 0
self.outMAC = (None, '', '', 0)
self.inMAC = (None, '', '', 0)
def setKeys(self, outIV, outKey, inIV, inKey, outInteg, inInteg):
"""
Set up the ciphers and hashes using the given keys,
@param outIV: the outgoing initialization vector
@param outKey: the outgoing encryption key
@param inIV: the incoming initialization vector
@param inKey: the incoming encryption key
@param outInteg: the outgoing integrity key
@param inInteg: the incoming integrity key.
"""
o = self._getCipher(self.outCipType, outIV, outKey)
self.encrypt = o.encrypt
self.encBlockSize = o.block_size
o = self._getCipher(self.inCipType, inIV, inKey)
self.decrypt = o.decrypt
self.decBlockSize = o.block_size
self.outMAC = self._getMAC(self.outMACType, outInteg)
self.inMAC = self._getMAC(self.inMACType, inInteg)
if self.inMAC:
self.verifyDigestSize = self.inMAC[3]
def _getCipher(self, cip, iv, key):
"""
Creates an initialized cipher object.
@param cip: the name of the cipher: maps into Crypto.Cipher.*
@param iv: the initialzation vector
@param key: the encryption key
"""
modName, keySize, counterMode = self.cipherMap[cip]
if not modName: # no cipher
return _DummyCipher()
mod = __import__('Crypto.Cipher.%s'%modName, {}, {}, 'x')
if counterMode:
return mod.new(key[:keySize], mod.MODE_CTR, iv[:mod.block_size],
counter=_Counter(iv, mod.block_size))
else:
return mod.new(key[:keySize], mod.MODE_CBC, iv[:mod.block_size])
def _getMAC(self, mac, key):
"""
Gets a 4-tuple representing the message authentication code.
(<hash module>, <inner hash value>, <outer hash value>,
<digest size>)
@param mac: a key mapping into macMap
@type mac: C{str}
@param key: the MAC key.
@type key: C{str}
"""
mod = self.macMap[mac]
if not mod:
return (None, '', '', 0)
ds = mod().digest_size
key = key[:ds] + '\x00' * (64 - ds)
i = XOR.new('\x36').encrypt(key)
o = XOR.new('\x5c').encrypt(key)
return mod, i, o, ds
def encrypt(self, blocks):
"""
Encrypt blocks. Overridden by the encrypt method of a
Crypto.Cipher.* object in setKeys().
@type blocks: C{str}
"""
raise NotImplementedError()
def decrypt(self, blocks):
"""
Decrypt blocks. See encrypt().
@type blocks: C{str}
"""
raise NotImplementedError()
def makeMAC(self, seqid, data):
"""
Create a message authentication code (MAC) for the given packet using
the outgoing MAC values.
@param seqid: the sequence ID of the outgoing packet
@type seqid: C{int}
@param data: the data to create a MAC for
@type data: C{str}
@rtype: C{str}
"""
if not self.outMAC[0]:
return ''
data = struct.pack('>L', seqid) + data
mod, i, o, ds = self.outMAC
inner = mod(i + data)
outer = mod(o + inner.digest())
return outer.digest()
def verify(self, seqid, data, mac):
"""
Verify an incoming MAC using the incoming MAC values. Return True
if the MAC is valid.
@param seqid: the sequence ID of the incoming packet
@type seqid: C{int}
@param data: the packet data to verify
@type data: C{str}
@param mac: the MAC sent with the packet
@type mac: C{str}
@rtype: C{bool}
"""
if not self.inMAC[0]:
return mac == ''
data = struct.pack('>L', seqid) + data
mod, i, o, ds = self.inMAC
inner = mod(i + data)
outer = mod(o + inner.digest())
return mac == outer.digest()
class _Counter:
"""
Stateful counter which returns results packed in a byte string
"""
def __init__(self, initialVector, blockSize):
"""
@type initialVector: C{str}
@param initialVector: A byte string representing the initial counter
value.
@type blockSize: C{int}
@param blockSize: The length of the output buffer, as well as the
number of bytes at the beginning of C{initialVector} to consider.
"""
initialVector = initialVector[:blockSize]
self.count = getMP('\xff\xff\xff\xff' + initialVector)[0]
self.blockSize = blockSize
self.count = Util.number.long_to_bytes(self.count - 1)
self.count = '\x00' * (self.blockSize - len(self.count)) + self.count
self.count = array.array('c', self.count)
self.len = len(self.count) - 1
def __call__(self):
"""
Increment the counter and return the new value.
"""
i = self.len
while i > -1:
self.count[i] = n = chr((ord(self.count[i]) + 1) % 256)
if n == '\x00':
i -= 1
else:
return self.count.tostring()
self.count = array.array('c', '\x00' * self.blockSize)
return self.count.tostring()
# Diffie-Hellman primes from Oakley Group 2 [RFC 2409]
DH_PRIME = long('17976931348623159077083915679378745319786029604875601170644'
'442368419718021615851936894783379586492554150218056548598050364644054819923'
'910005079287700335581663922955313623907650873575991482257486257500742530207'
'744771258955095793777842444242661733472762929938766870920560605027081084290'
'7692932019128194467627007L')
DH_GENERATOR = 2L
MSG_DISCONNECT = 1
MSG_IGNORE = 2
MSG_UNIMPLEMENTED = 3
MSG_DEBUG = 4
MSG_SERVICE_REQUEST = 5
MSG_SERVICE_ACCEPT = 6
MSG_KEXINIT = 20
MSG_NEWKEYS = 21
MSG_KEXDH_INIT = 30
MSG_KEXDH_REPLY = 31
MSG_KEX_DH_GEX_REQUEST_OLD = 30
MSG_KEX_DH_GEX_REQUEST = 34
MSG_KEX_DH_GEX_GROUP = 31
MSG_KEX_DH_GEX_INIT = 32
MSG_KEX_DH_GEX_REPLY = 33
DISCONNECT_HOST_NOT_ALLOWED_TO_CONNECT = 1
DISCONNECT_PROTOCOL_ERROR = 2
DISCONNECT_KEY_EXCHANGE_FAILED = 3
DISCONNECT_RESERVED = 4
DISCONNECT_MAC_ERROR = 5
DISCONNECT_COMPRESSION_ERROR = 6
DISCONNECT_SERVICE_NOT_AVAILABLE = 7
DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED = 8
DISCONNECT_HOST_KEY_NOT_VERIFIABLE = 9
DISCONNECT_CONNECTION_LOST = 10
DISCONNECT_BY_APPLICATION = 11
DISCONNECT_TOO_MANY_CONNECTIONS = 12
DISCONNECT_AUTH_CANCELLED_BY_USER = 13
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE = 14
DISCONNECT_ILLEGAL_USER_NAME = 15
messages = {}
for name, value in globals().items():
if name.startswith('MSG_'):
messages[value] = name
| apache-2.0 |
Frostman/python-yubico | test/test_yubico.py | 3 | 1657 | #!/usr/bin/env python
#
# Simple test cases for a Python version of the yubikey_crc16() function in ykcrc.c.
#
import struct
import unittest
import yubico.yubico_util as yubico_util
from yubico.yubico_util import crc16
CRC_OK_RESIDUAL=0xf0b8
class TestCRC(unittest.TestCase):
def test_first(self):
""" Test CRC16 trivial case """
buffer = b'\x01\x02\x03\x04'
crc = crc16(buffer)
self.assertEqual(crc, 0xc66e)
return buffer,crc
def test_second(self):
""" Test CRC16 residual calculation """
buffer,crc = self.test_first()
# Append 1st complement for a "self-verifying" block -
# from example in Yubikey low level interface
crc_inv = 0xffff - crc
buffer += struct.pack('<H', crc_inv)
crc2 = crc16(buffer)
self.assertEqual(crc2, CRC_OK_RESIDUAL)
def test_hexdump(self):
""" Test hexdump function, normal use """
bytes = b'\x01\x02\x03\x04\x05\x06\x07\x08'
self.assertEqual(yubico_util.hexdump(bytes, length=4), \
'0000 01 02 03 04\n0004 05 06 07 08\n')
def test_hexdump2(self):
""" Test hexdump function, with colors """
bytes = b'\x01\x02\x03\x04\x05\x06\x07\x08'
self.assertEqual(yubico_util.hexdump(bytes, length=4, colorize=True), \
'0000 \x1b[0m01 02 03\x1b[0m 04\n0004 \x1b[0m05 06 07\x1b[0m 08\n')
def test_modhex_decode(self):
""" Test modhex decoding """
self.assertEqual(b"0123456789abcdef", yubico_util.modhex_decode(b"cbdefghijklnrtuv"))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
Execut3/CTF | IRAN Cert/2016/3- Harder/Web/my awesome shop/Challenge Sources/shop-server/comments/views.py | 1 | 1251 | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from app.models import ShopUser
from comments.models import Comment
def create_new(request):
if request.method == 'POST':
user = request.user
get_object_or_404(ShopUser, pk=user.id)
content = request.POST.get('content', '')
print content
if content:
try:
Comment.objects.create(user=user, content=content)
except:
comment = Comment.objects.get(user=user, content=content)
comment.active = False
comment.save()
return HttpResponseRedirect(reverse('view_comments'))
def view(request):
user = request.user
get_object_or_404(ShopUser, pk=user.id)
comments = Comment.objects.filter(user=user)
return render_to_response('comments/view.html', locals(), RequestContext(request))
def flush(request):
user = request.user
get_object_or_404(ShopUser, pk=user.id)
Comment.objects.filter(user=user).delete()
return render_to_response('comments/view.html', locals(), RequestContext(request)) | gpl-2.0 |
bhmm/bhmm | bhmm/tests/benchmark_hidden.py | 2 | 6242 |
# This file is part of BHMM (Bayesian Hidden Markov Models).
#
# Copyright (c) 2016 Frank Noe (Freie Universitaet Berlin)
# and John D. Chodera (Memorial Sloan-Kettering Cancer Center, New York)
#
# BHMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
__author__ = 'noe'
import unittest
import numpy as np
import time
from bhmm import hidden
from bhmm.output_models.gaussian import GaussianOutputModel
print_speedup = True
class BenchmarkHidden(object):
def __init__(self, nrep=10, kernel='c'):
self.kernel = kernel
self.nrep = nrep
# variables
self.nexamples = 0
self.A = []
self.pi = []
self.pobs = []
self.T = []
self.N = []
self.alpha = []
self.beta = []
self.gamma = []
self.time_alpha = []
self.time_beta = []
self.time_gamma = []
self.time_c = []
self.time_C = []
self.time_vpath = []
self.alpha_mem = []
self.beta_mem = []
self.gamma_mem = []
self.C_mem = []
# second example
A = np.array([[0.97, 0.02, 0.01],
[0.1, 0.8, 0.1],
[0.01, 0.02, 0.97]])
pi = np.array([0.45, 0.1, 0.45])
T = 1000000
means = np.array([-1.0, 0.0, 1.0])
sigmas = np.array([0.5, 0.5, 0.5])
gom = GaussianOutputModel(3, means=means, sigmas=sigmas)
obs = np.random.randint(3, size=T)
pobs = gom.p_obs(obs)
self.append_example(A, pi, pobs)
def append_example(self, A, pi, pobs):
i = len(self.A)
self.A.append(A)
self.pi.append(pi)
self.pobs.append(pobs)
self.N.append(A.shape[0])
self.T.append(pobs.shape[0])
# compute intermediates
_, alpha, _ = self.run_forward(i, None)
self.alpha.append(alpha)
beta, _ = self.run_backward(i, None)
self.beta.append(beta)
gamma, _ = self.run_gamma(i, None)
self.gamma.append(gamma)
#
self.alpha_mem.append(np.zeros((pobs.shape[0], A.shape[0])))
self.beta_mem.append(np.zeros((pobs.shape[0], A.shape[0])))
self.gamma_mem.append(np.zeros((pobs.shape[0], A.shape[0])))
self.C_mem.append(np.zeros((A.shape[0], A.shape[0])))
self.nexamples += 1
def run_forward(self, i, out):
logprob = 0
alpha = None
hidden.set_implementation(self.kernel)
time1 = time.time()
for k in range(self.nrep):
logprob, alpha = hidden.forward(self.A[i], self.pobs[i], self.pi[i], alpha_out=out)
# compare
time2 = time.time()
d = (time2-time1)/(1.0*self.nrep)
return logprob, alpha, d
def run_backward(self, i, out):
beta = None
hidden.set_implementation(self.kernel)
time1 = time.time()
for k in range(self.nrep):
beta = hidden.backward(self.A[i], self.pobs[i], beta_out=out)
# compare
time2 = time.time()
d = (time2-time1)/(1.0*self.nrep)
return beta, d
def run_gamma(self, i, out):
gamma = None
hidden.set_implementation(self.kernel)
time1 = time.time()
for k in range(self.nrep):
gamma = hidden.state_probabilities(self.alpha[i], self.beta[i], gamma_out=out)
# compare
time2 = time.time()
d = (time2-time1)/(1.0*self.nrep)
return gamma, d
def run_state_counts(self, i, out):
c = None
hidden.set_implementation(self.kernel)
time1 = time.time()
for k in range(self.nrep):
c = hidden.state_counts(self.gamma[i], self.T[i])
# compare
time2 = time.time()
d = (time2-time1)/(1.0*self.nrep)
return c, d
def run_transition_counts(self, i, out):
C = None
hidden.set_implementation(self.kernel)
time1 = time.time()
for k in range(self.nrep):
C = hidden.transition_counts(self.alpha[i], self.beta[i], self.A[i], self.pobs[i], out=out)
# compare
time2 = time.time()
d = (time2-time1) / (1.0*self.nrep)
return C, d
def run_viterbi(self, i, out):
vpath = None
hidden.set_implementation(self.kernel)
time1 = time.time()
for k in range(self.nrep):
vpath = hidden.viterbi(self.A[i], self.pobs[i], self.pi[i])
# compare
time2 = time.time()
d = (time2-time1) / (1.0*self.nrep)
return vpath, d
def run_comp(self, call, outs):
"""
Reference. Just computes the time
"""
for i in range(self.nexamples):
if outs is None:
res = call(i, None)
else:
res = call(i, outs[i])
pkernel = 'mem'
if print_speedup:
print('\t' + str(call.__name__) + '\t Impl = ' + pkernel + ' Time = ' + str(int(1000.0*res[-1])) + ' ms')
def main():
bh = BenchmarkHidden()
# from scratch
bh.run_comp(bh.run_forward, None)
bh.run_comp(bh.run_backward, None)
bh.run_comp(bh.run_gamma, None)
bh.run_comp(bh.run_state_counts, None)
bh.run_comp(bh.run_transition_counts, None)
bh.run_comp(bh.run_viterbi, None)
print()
# in memory
bh.run_comp(bh.run_forward, bh.alpha_mem)
bh.run_comp(bh.run_backward, bh.beta_mem)
bh.run_comp(bh.run_gamma, bh.gamma_mem)
bh.run_comp(bh.run_state_counts, None)
bh.run_comp(bh.run_transition_counts, bh.C_mem)
bh.run_comp(bh.run_viterbi, None)
if __name__ == "__main__":
main()
| lgpl-3.0 |
invisiblek/python-for-android | python-modules/twisted/twisted/test/test_pb.py | 49 | 56066 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for Perspective Broker module.
TODO: update protocol level tests to use new connection API, leaving
only specific tests for old API.
"""
# issue1195 TODOs: replace pump.pump() with something involving Deferreds.
# Clean up warning suppression.
import sys, os, time, gc
from cStringIO import StringIO
from zope.interface import implements, Interface
from twisted.python.versions import Version
from twisted.trial import unittest
from twisted.spread import pb, util, publish, jelly
from twisted.internet import protocol, main, reactor
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.defer import Deferred, gatherResults, succeed
from twisted.protocols.policies import WrappingFactory
from twisted.python import failure, log
from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials
from twisted.cred import portal, checkers, credentials
class Dummy(pb.Viewable):
def view_doNothing(self, user):
if isinstance(user, DummyPerspective):
return 'hello world!'
else:
return 'goodbye, cruel world!'
class DummyPerspective(pb.Avatar):
"""
An L{IPerspective} avatar which will be used in some tests.
"""
def perspective_getDummyViewPoint(self):
return Dummy()
class DummyRealm(object):
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if iface is pb.IPerspective:
return iface, DummyPerspective(avatarId), lambda: None
class IOPump:
"""
Utility to pump data between clients and servers for protocol testing.
Perhaps this is a utility worthy of being in protocol.py?
"""
def __init__(self, client, server, clientIO, serverIO):
self.client = client
self.server = server
self.clientIO = clientIO
self.serverIO = serverIO
def flush(self):
"""
Pump until there is no more input or output. This does not run any
timers, so don't use it with any code that calls reactor.callLater.
"""
# failsafe timeout
timeout = time.time() + 5
while self.pump():
if time.time() > timeout:
return
def pump(self):
"""
Move data back and forth.
Returns whether any data was moved.
"""
self.clientIO.seek(0)
self.serverIO.seek(0)
cData = self.clientIO.read()
sData = self.serverIO.read()
self.clientIO.seek(0)
self.serverIO.seek(0)
self.clientIO.truncate()
self.serverIO.truncate()
self.client.transport._checkProducer()
self.server.transport._checkProducer()
for byte in cData:
self.server.dataReceived(byte)
for byte in sData:
self.client.dataReceived(byte)
if cData or sData:
return 1
else:
return 0
def connectedServerAndClient():
"""
Returns a 3-tuple: (client, server, pump).
"""
clientBroker = pb.Broker()
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(guest='guest')
factory = pb.PBServerFactory(portal.Portal(DummyRealm(), [checker]))
serverBroker = factory.buildProtocol(('127.0.0.1',))
clientTransport = StringIO()
serverTransport = StringIO()
clientBroker.makeConnection(protocol.FileWrapper(clientTransport))
serverBroker.makeConnection(protocol.FileWrapper(serverTransport))
pump = IOPump(clientBroker, serverBroker, clientTransport, serverTransport)
# Challenge-response authentication:
pump.flush()
return clientBroker, serverBroker, pump
class SimpleRemote(pb.Referenceable):
def remote_thunk(self, arg):
self.arg = arg
return arg + 1
def remote_knuth(self, arg):
raise Exception()
class NestedRemote(pb.Referenceable):
def remote_getSimple(self):
return SimpleRemote()
class SimpleCopy(pb.Copyable):
def __init__(self):
self.x = 1
self.y = {"Hello":"World"}
self.z = ['test']
class SimpleLocalCopy(pb.RemoteCopy):
pass
pb.setUnjellyableForClass(SimpleCopy, SimpleLocalCopy)
class SimpleFactoryCopy(pb.Copyable):
"""
@cvar allIDs: hold every created instances of this class.
@type allIDs: C{dict}
"""
allIDs = {}
def __init__(self, id):
self.id = id
SimpleFactoryCopy.allIDs[id] = self
def createFactoryCopy(state):
"""
Factory of L{SimpleFactoryCopy}, getting a created instance given the
C{id} found in C{state}.
"""
stateId = state.get("id", None)
if stateId is None:
raise RuntimeError("factory copy state has no 'id' member %s" %
(repr(state),))
if not stateId in SimpleFactoryCopy.allIDs:
raise RuntimeError("factory class has no ID: %s" %
(SimpleFactoryCopy.allIDs,))
inst = SimpleFactoryCopy.allIDs[stateId]
if not inst:
raise RuntimeError("factory method found no object with id")
return inst
pb.setUnjellyableFactoryForClass(SimpleFactoryCopy, createFactoryCopy)
class NestedCopy(pb.Referenceable):
def remote_getCopy(self):
return SimpleCopy()
def remote_getFactory(self, value):
return SimpleFactoryCopy(value)
class SimpleCache(pb.Cacheable):
def __init___(self):
self.x = 1
self.y = {"Hello":"World"}
self.z = ['test']
class NestedComplicatedCache(pb.Referenceable):
def __init__(self):
self.c = VeryVeryComplicatedCacheable()
def remote_getCache(self):
return self.c
class VeryVeryComplicatedCacheable(pb.Cacheable):
def __init__(self):
self.x = 1
self.y = 2
self.foo = 3
def setFoo4(self):
self.foo = 4
self.observer.callRemote('foo',4)
def getStateToCacheAndObserveFor(self, perspective, observer):
self.observer = observer
return {"x": self.x,
"y": self.y,
"foo": self.foo}
def stoppedObserving(self, perspective, observer):
log.msg("stopped observing")
observer.callRemote("end")
if observer == self.observer:
self.observer = None
class RatherBaroqueCache(pb.RemoteCache):
def observe_foo(self, newFoo):
self.foo = newFoo
def observe_end(self):
log.msg("the end of things")
pb.setUnjellyableForClass(VeryVeryComplicatedCacheable, RatherBaroqueCache)
class SimpleLocalCache(pb.RemoteCache):
def setCopyableState(self, state):
self.__dict__.update(state)
def checkMethod(self):
return self.check
def checkSelf(self):
return self
def check(self):
return 1
pb.setUnjellyableForClass(SimpleCache, SimpleLocalCache)
class NestedCache(pb.Referenceable):
def __init__(self):
self.x = SimpleCache()
def remote_getCache(self):
return [self.x,self.x]
def remote_putCache(self, cache):
return (self.x is cache)
class Observable(pb.Referenceable):
def __init__(self):
self.observers = []
def remote_observe(self, obs):
self.observers.append(obs)
def remote_unobserve(self, obs):
self.observers.remove(obs)
def notify(self, obj):
for observer in self.observers:
observer.callRemote('notify', self, obj)
class DeferredRemote(pb.Referenceable):
def __init__(self):
self.run = 0
def runMe(self, arg):
self.run = arg
return arg + 1
def dontRunMe(self, arg):
assert 0, "shouldn't have been run!"
def remote_doItLater(self):
"""
Return a L{Deferred} to be fired on client side. When fired,
C{self.runMe} is called.
"""
d = Deferred()
d.addCallbacks(self.runMe, self.dontRunMe)
self.d = d
return d
class Observer(pb.Referenceable):
notified = 0
obj = None
def remote_notify(self, other, obj):
self.obj = obj
self.notified = self.notified + 1
other.callRemote('unobserve',self)
class NewStyleCopy(pb.Copyable, pb.RemoteCopy, object):
def __init__(self, s):
self.s = s
pb.setUnjellyableForClass(NewStyleCopy, NewStyleCopy)
class NewStyleCopy2(pb.Copyable, pb.RemoteCopy, object):
allocated = 0
initialized = 0
value = 1
def __new__(self):
NewStyleCopy2.allocated += 1
inst = object.__new__(self)
inst.value = 2
return inst
def __init__(self):
NewStyleCopy2.initialized += 1
pb.setUnjellyableForClass(NewStyleCopy2, NewStyleCopy2)
class NewStyleCacheCopy(pb.Cacheable, pb.RemoteCache, object):
def getStateToCacheAndObserveFor(self, perspective, observer):
return self.__dict__
pb.setUnjellyableForClass(NewStyleCacheCopy, NewStyleCacheCopy)
class Echoer(pb.Root):
def remote_echo(self, st):
return st
class CachedReturner(pb.Root):
def __init__(self, cache):
self.cache = cache
def remote_giveMeCache(self, st):
return self.cache
class NewStyleTestCase(unittest.TestCase):
def setUp(self):
"""
Create a pb server using L{Echoer} protocol and connect a client to it.
"""
self.serverFactory = pb.PBServerFactory(Echoer())
self.wrapper = WrappingFactory(self.serverFactory)
self.server = reactor.listenTCP(0, self.wrapper)
clientFactory = pb.PBClientFactory()
reactor.connectTCP("localhost", self.server.getHost().port,
clientFactory)
def gotRoot(ref):
self.ref = ref
return clientFactory.getRootObject().addCallback(gotRoot)
def tearDown(self):
"""
Close client and server connections, reset values of L{NewStyleCopy2}
class variables.
"""
NewStyleCopy2.allocated = 0
NewStyleCopy2.initialized = 0
NewStyleCopy2.value = 1
self.ref.broker.transport.loseConnection()
# Disconnect any server-side connections too.
for proto in self.wrapper.protocols:
proto.transport.loseConnection()
return self.server.stopListening()
def test_newStyle(self):
"""
Create a new style object, send it over the wire, and check the result.
"""
orig = NewStyleCopy("value")
d = self.ref.callRemote("echo", orig)
def cb(res):
self.failUnless(isinstance(res, NewStyleCopy))
self.failUnlessEqual(res.s, "value")
self.failIf(res is orig) # no cheating :)
d.addCallback(cb)
return d
def test_alloc(self):
"""
Send a new style object and check the number of allocations.
"""
orig = NewStyleCopy2()
self.failUnlessEqual(NewStyleCopy2.allocated, 1)
self.failUnlessEqual(NewStyleCopy2.initialized, 1)
d = self.ref.callRemote("echo", orig)
def cb(res):
# receiving the response creates a third one on the way back
self.failUnless(isinstance(res, NewStyleCopy2))
self.failUnlessEqual(res.value, 2)
self.failUnlessEqual(NewStyleCopy2.allocated, 3)
self.failUnlessEqual(NewStyleCopy2.initialized, 1)
self.failIf(res is orig) # no cheating :)
# sending the object creates a second one on the far side
d.addCallback(cb)
return d
class ConnectionNotifyServerFactory(pb.PBServerFactory):
"""
A server factory which stores the last connection and fires a
L{Deferred} on connection made. This factory can handle only one
client connection.
@ivar protocolInstance: the last protocol instance.
@type protocolInstance: C{pb.Broker}
@ivar connectionMade: the deferred fired upon connection.
@type connectionMade: C{Deferred}
"""
protocolInstance = None
def __init__(self, root):
"""
Initialize the factory.
"""
pb.PBServerFactory.__init__(self, root)
self.connectionMade = Deferred()
def clientConnectionMade(self, protocol):
"""
Store the protocol and fire the connection deferred.
"""
self.protocolInstance = protocol
d, self.connectionMade = self.connectionMade, None
if d is not None:
d.callback(None)
class NewStyleCachedTestCase(unittest.TestCase):
def setUp(self):
"""
Create a pb server using L{CachedReturner} protocol and connect a
client to it.
"""
self.orig = NewStyleCacheCopy()
self.orig.s = "value"
self.server = reactor.listenTCP(0,
ConnectionNotifyServerFactory(CachedReturner(self.orig)))
clientFactory = pb.PBClientFactory()
reactor.connectTCP("localhost", self.server.getHost().port,
clientFactory)
def gotRoot(ref):
self.ref = ref
d1 = clientFactory.getRootObject().addCallback(gotRoot)
d2 = self.server.factory.connectionMade
return gatherResults([d1, d2])
def tearDown(self):
"""
Close client and server connections.
"""
self.server.factory.protocolInstance.transport.loseConnection()
self.ref.broker.transport.loseConnection()
return self.server.stopListening()
def test_newStyleCache(self):
"""
Get the object from the cache, and checks its properties.
"""
d = self.ref.callRemote("giveMeCache", self.orig)
def cb(res):
self.failUnless(isinstance(res, NewStyleCacheCopy))
self.failUnlessEqual(res.s, "value")
self.failIf(res is self.orig) # no cheating :)
d.addCallback(cb)
return d
class BrokerTestCase(unittest.TestCase):
thunkResult = None
def tearDown(self):
try:
# from RemotePublished.getFileName
os.unlink('None-None-TESTING.pub')
except OSError:
pass
def thunkErrorBad(self, error):
self.fail("This should cause a return value, not %s" % (error,))
def thunkResultGood(self, result):
self.thunkResult = result
def thunkErrorGood(self, tb):
pass
def thunkResultBad(self, result):
self.fail("This should cause an error, not %s" % (result,))
def test_reference(self):
c, s, pump = connectedServerAndClient()
class X(pb.Referenceable):
def remote_catch(self,arg):
self.caught = arg
class Y(pb.Referenceable):
def remote_throw(self, a, b):
a.callRemote('catch', b)
s.setNameForLocal("y", Y())
y = c.remoteForName("y")
x = X()
z = X()
y.callRemote('throw', x, z)
pump.pump()
pump.pump()
pump.pump()
self.assertIdentical(x.caught, z, "X should have caught Z")
# make sure references to remote methods are equals
self.assertEquals(y.remoteMethod('throw'), y.remoteMethod('throw'))
def test_result(self):
c, s, pump = connectedServerAndClient()
for x, y in (c, s), (s, c):
# test reflexivity
foo = SimpleRemote()
x.setNameForLocal("foo", foo)
bar = y.remoteForName("foo")
self.expectedThunkResult = 8
bar.callRemote('thunk',self.expectedThunkResult - 1
).addCallbacks(self.thunkResultGood, self.thunkErrorBad)
# Send question.
pump.pump()
# Send response.
pump.pump()
# Shouldn't require any more pumping than that...
self.assertEquals(self.thunkResult, self.expectedThunkResult,
"result wasn't received.")
def refcountResult(self, result):
self.nestedRemote = result
def test_tooManyRefs(self):
l = []
e = []
c, s, pump = connectedServerAndClient()
foo = NestedRemote()
s.setNameForLocal("foo", foo)
x = c.remoteForName("foo")
for igno in xrange(pb.MAX_BROKER_REFS + 10):
if s.transport.closed or c.transport.closed:
break
x.callRemote("getSimple").addCallbacks(l.append, e.append)
pump.pump()
expected = (pb.MAX_BROKER_REFS - 1)
self.assertTrue(s.transport.closed, "transport was not closed")
self.assertEquals(len(l), expected,
"expected %s got %s" % (expected, len(l)))
def test_copy(self):
c, s, pump = connectedServerAndClient()
foo = NestedCopy()
s.setNameForLocal("foo", foo)
x = c.remoteForName("foo")
x.callRemote('getCopy'
).addCallbacks(self.thunkResultGood, self.thunkErrorBad)
pump.pump()
pump.pump()
self.assertEquals(self.thunkResult.x, 1)
self.assertEquals(self.thunkResult.y['Hello'], 'World')
self.assertEquals(self.thunkResult.z[0], 'test')
def test_observe(self):
c, s, pump = connectedServerAndClient()
# this is really testing the comparison between remote objects, to make
# sure that you can *UN*observe when you have an observer architecture.
a = Observable()
b = Observer()
s.setNameForLocal("a", a)
ra = c.remoteForName("a")
ra.callRemote('observe',b)
pump.pump()
a.notify(1)
pump.pump()
pump.pump()
a.notify(10)
pump.pump()
pump.pump()
self.assertNotIdentical(b.obj, None, "didn't notify")
self.assertEquals(b.obj, 1, 'notified too much')
def test_defer(self):
c, s, pump = connectedServerAndClient()
d = DeferredRemote()
s.setNameForLocal("d", d)
e = c.remoteForName("d")
pump.pump(); pump.pump()
results = []
e.callRemote('doItLater').addCallback(results.append)
pump.pump(); pump.pump()
self.assertFalse(d.run, "Deferred method run too early.")
d.d.callback(5)
self.assertEquals(d.run, 5, "Deferred method run too late.")
pump.pump(); pump.pump()
self.assertEquals(results[0], 6, "Incorrect result.")
def test_refcount(self):
c, s, pump = connectedServerAndClient()
foo = NestedRemote()
s.setNameForLocal("foo", foo)
bar = c.remoteForName("foo")
bar.callRemote('getSimple'
).addCallbacks(self.refcountResult, self.thunkErrorBad)
# send question
pump.pump()
# send response
pump.pump()
# delving into internal structures here, because GC is sort of
# inherently internal.
rluid = self.nestedRemote.luid
self.assertIn(rluid, s.localObjects)
del self.nestedRemote
# nudge the gc
if sys.hexversion >= 0x2000000:
gc.collect()
# try to nudge the GC even if we can't really
pump.pump()
pump.pump()
pump.pump()
self.assertNotIn(rluid, s.localObjects)
def test_cache(self):
c, s, pump = connectedServerAndClient()
obj = NestedCache()
obj2 = NestedComplicatedCache()
vcc = obj2.c
s.setNameForLocal("obj", obj)
s.setNameForLocal("xxx", obj2)
o2 = c.remoteForName("obj")
o3 = c.remoteForName("xxx")
coll = []
o2.callRemote("getCache"
).addCallback(coll.append).addErrback(coll.append)
o2.callRemote("getCache"
).addCallback(coll.append).addErrback(coll.append)
complex = []
o3.callRemote("getCache").addCallback(complex.append)
o3.callRemote("getCache").addCallback(complex.append)
pump.flush()
# `worst things first'
self.assertEquals(complex[0].x, 1)
self.assertEquals(complex[0].y, 2)
self.assertEquals(complex[0].foo, 3)
vcc.setFoo4()
pump.flush()
self.assertEquals(complex[0].foo, 4)
self.assertEquals(len(coll), 2)
cp = coll[0][0]
self.assertIdentical(cp.checkMethod().im_self, cp,
"potential refcounting issue")
self.assertIdentical(cp.checkSelf(), cp,
"other potential refcounting issue")
col2 = []
o2.callRemote('putCache',cp).addCallback(col2.append)
pump.flush()
# The objects were the same (testing lcache identity)
self.assertTrue(col2[0])
# test equality of references to methods
self.assertEquals(o2.remoteMethod("getCache"),
o2.remoteMethod("getCache"))
# now, refcounting (similiar to testRefCount)
luid = cp.luid
baroqueLuid = complex[0].luid
self.assertIn(luid, s.remotelyCachedObjects,
"remote cache doesn't have it")
del coll
del cp
pump.flush()
del complex
del col2
# extra nudge...
pump.flush()
# del vcc.observer
# nudge the gc
if sys.hexversion >= 0x2000000:
gc.collect()
# try to nudge the GC even if we can't really
pump.flush()
# The GC is done with it.
self.assertNotIn(luid, s.remotelyCachedObjects,
"Server still had it after GC")
self.assertNotIn(luid, c.locallyCachedObjects,
"Client still had it after GC")
self.assertNotIn(baroqueLuid, s.remotelyCachedObjects,
"Server still had complex after GC")
self.assertNotIn(baroqueLuid, c.locallyCachedObjects,
"Client still had complex after GC")
self.assertIdentical(vcc.observer, None, "observer was not removed")
def test_publishable(self):
try:
os.unlink('None-None-TESTING.pub') # from RemotePublished.getFileName
except OSError:
pass # Sometimes it's not there.
c, s, pump = connectedServerAndClient()
foo = GetPublisher()
# foo.pub.timestamp = 1.0
s.setNameForLocal("foo", foo)
bar = c.remoteForName("foo")
accum = []
bar.callRemote('getPub').addCallbacks(accum.append, self.thunkErrorBad)
pump.flush()
obj = accum.pop()
self.assertEquals(obj.activateCalled, 1)
self.assertEquals(obj.isActivated, 1)
self.assertEquals(obj.yayIGotPublished, 1)
# timestamp's dirty, we don't have a cache file
self.assertEquals(obj._wasCleanWhenLoaded, 0)
c, s, pump = connectedServerAndClient()
s.setNameForLocal("foo", foo)
bar = c.remoteForName("foo")
bar.callRemote('getPub').addCallbacks(accum.append, self.thunkErrorBad)
pump.flush()
obj = accum.pop()
# timestamp's clean, our cache file is up-to-date
self.assertEquals(obj._wasCleanWhenLoaded, 1)
def gotCopy(self, val):
self.thunkResult = val.id
def test_factoryCopy(self):
c, s, pump = connectedServerAndClient()
ID = 99
obj = NestedCopy()
s.setNameForLocal("foo", obj)
x = c.remoteForName("foo")
x.callRemote('getFactory', ID
).addCallbacks(self.gotCopy, self.thunkResultBad)
pump.pump()
pump.pump()
pump.pump()
self.assertEquals(self.thunkResult, ID,
"ID not correct on factory object %s" % (self.thunkResult,))
bigString = "helloworld" * 50
callbackArgs = None
callbackKeyword = None
def finishedCallback(*args, **kw):
global callbackArgs, callbackKeyword
callbackArgs = args
callbackKeyword = kw
class Pagerizer(pb.Referenceable):
def __init__(self, callback, *args, **kw):
self.callback, self.args, self.kw = callback, args, kw
def remote_getPages(self, collector):
util.StringPager(collector, bigString, 100,
self.callback, *self.args, **self.kw)
self.args = self.kw = None
class FilePagerizer(pb.Referenceable):
pager = None
def __init__(self, filename, callback, *args, **kw):
self.filename = filename
self.callback, self.args, self.kw = callback, args, kw
def remote_getPages(self, collector):
self.pager = util.FilePager(collector, file(self.filename),
self.callback, *self.args, **self.kw)
self.args = self.kw = None
class PagingTestCase(unittest.TestCase):
"""
Test pb objects sending data by pages.
"""
def setUp(self):
"""
Create a file used to test L{util.FilePager}.
"""
self.filename = self.mktemp()
fd = file(self.filename, 'w')
fd.write(bigString)
fd.close()
def test_pagingWithCallback(self):
"""
Test L{util.StringPager}, passing a callback to fire when all pages
are sent.
"""
c, s, pump = connectedServerAndClient()
s.setNameForLocal("foo", Pagerizer(finishedCallback, 'hello', value=10))
x = c.remoteForName("foo")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
self.assertEquals(callbackArgs, ('hello',),
"Completed callback not invoked")
self.assertEquals(callbackKeyword, {'value': 10},
"Completed callback not invoked")
def test_pagingWithoutCallback(self):
"""
Test L{util.StringPager} without a callback.
"""
c, s, pump = connectedServerAndClient()
s.setNameForLocal("foo", Pagerizer(None))
x = c.remoteForName("foo")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
def test_emptyFilePaging(self):
"""
Test L{util.FilePager}, sending an empty file.
"""
filenameEmpty = self.mktemp()
fd = file(filenameEmpty, 'w')
fd.close()
c, s, pump = connectedServerAndClient()
pagerizer = FilePagerizer(filenameEmpty, None)
s.setNameForLocal("bar", pagerizer)
x = c.remoteForName("bar")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
ttl = 10
while not l and ttl > 0:
pump.pump()
ttl -= 1
if not ttl:
self.fail('getAllPages timed out')
self.assertEquals(''.join(l[0]), '',
"Pages received not equal to pages sent!")
def test_filePagingWithCallback(self):
"""
Test L{util.FilePager}, passing a callback to fire when all pages
are sent, and verify that the pager doesn't keep chunks in memory.
"""
c, s, pump = connectedServerAndClient()
pagerizer = FilePagerizer(self.filename, finishedCallback,
'frodo', value = 9)
s.setNameForLocal("bar", pagerizer)
x = c.remoteForName("bar")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
self.assertEquals(callbackArgs, ('frodo',),
"Completed callback not invoked")
self.assertEquals(callbackKeyword, {'value': 9},
"Completed callback not invoked")
self.assertEquals(pagerizer.pager.chunks, [])
def test_filePagingWithoutCallback(self):
"""
Test L{util.FilePager} without a callback.
"""
c, s, pump = connectedServerAndClient()
pagerizer = FilePagerizer(self.filename, None)
s.setNameForLocal("bar", pagerizer)
x = c.remoteForName("bar")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
self.assertEquals(pagerizer.pager.chunks, [])
class DumbPublishable(publish.Publishable):
def getStateToPublish(self):
return {"yayIGotPublished": 1}
class DumbPub(publish.RemotePublished):
def activated(self):
self.activateCalled = 1
class GetPublisher(pb.Referenceable):
def __init__(self):
self.pub = DumbPublishable("TESTING")
def remote_getPub(self):
return self.pub
pb.setUnjellyableForClass(DumbPublishable, DumbPub)
class DisconnectionTestCase(unittest.TestCase):
"""
Test disconnection callbacks.
"""
def error(self, *args):
raise RuntimeError("I shouldn't have been called: %s" % (args,))
def gotDisconnected(self):
"""
Called on broker disconnect.
"""
self.gotCallback = 1
def objectDisconnected(self, o):
"""
Called on RemoteReference disconnect.
"""
self.assertEquals(o, self.remoteObject)
self.objectCallback = 1
def test_badSerialization(self):
c, s, pump = connectedServerAndClient()
pump.pump()
s.setNameForLocal("o", BadCopySet())
g = c.remoteForName("o")
l = []
g.callRemote("setBadCopy", BadCopyable()).addErrback(l.append)
pump.flush()
self.assertEquals(len(l), 1)
def test_disconnection(self):
c, s, pump = connectedServerAndClient()
pump.pump()
s.setNameForLocal("o", SimpleRemote())
# get a client reference to server object
r = c.remoteForName("o")
pump.pump()
pump.pump()
pump.pump()
# register and then unregister disconnect callbacks
# making sure they get unregistered
c.notifyOnDisconnect(self.error)
self.assertIn(self.error, c.disconnects)
c.dontNotifyOnDisconnect(self.error)
self.assertNotIn(self.error, c.disconnects)
r.notifyOnDisconnect(self.error)
self.assertIn(r._disconnected, c.disconnects)
self.assertIn(self.error, r.disconnectCallbacks)
r.dontNotifyOnDisconnect(self.error)
self.assertNotIn(r._disconnected, c.disconnects)
self.assertNotIn(self.error, r.disconnectCallbacks)
# register disconnect callbacks
c.notifyOnDisconnect(self.gotDisconnected)
r.notifyOnDisconnect(self.objectDisconnected)
self.remoteObject = r
# disconnect
c.connectionLost(failure.Failure(main.CONNECTION_DONE))
self.assertTrue(self.gotCallback)
self.assertTrue(self.objectCallback)
class FreakOut(Exception):
pass
class BadCopyable(pb.Copyable):
def getStateToCopyFor(self, p):
raise FreakOut()
class BadCopySet(pb.Referenceable):
def remote_setBadCopy(self, bc):
return None
class LocalRemoteTest(util.LocalAsRemote):
reportAllTracebacks = 0
def sync_add1(self, x):
return x + 1
def async_add(self, x=0, y=1):
return x + y
def async_fail(self):
raise RuntimeError()
class MyPerspective(pb.Avatar):
"""
@ivar loggedIn: set to C{True} when the avatar is logged in.
@type loggedIn: C{bool}
@ivar loggedOut: set to C{True} when the avatar is logged out.
@type loggedOut: C{bool}
"""
implements(pb.IPerspective)
loggedIn = loggedOut = False
def __init__(self, avatarId):
self.avatarId = avatarId
def perspective_getAvatarId(self):
"""
Return the avatar identifier which was used to access this avatar.
"""
return self.avatarId
def perspective_getViewPoint(self):
return MyView()
def perspective_add(self, a, b):
"""
Add the given objects and return the result. This is a method
unavailable on L{Echoer}, so it can only be invoked by authenticated
users who received their avatar from L{TestRealm}.
"""
return a + b
def logout(self):
self.loggedOut = True
class TestRealm(object):
"""
A realm which repeatedly gives out a single instance of L{MyPerspective}
for non-anonymous logins and which gives out a new instance of L{Echoer}
for each anonymous login.
@ivar lastPerspective: The L{MyPerspective} most recently created and
returned from C{requestAvatar}.
@ivar perspectiveFactory: A one-argument callable which will be used to
create avatars to be returned from C{requestAvatar}.
"""
perspectiveFactory = MyPerspective
lastPerspective = None
def requestAvatar(self, avatarId, mind, interface):
"""
Verify that the mind and interface supplied have the expected values
(this should really be done somewhere else, like inside a test method)
and return an avatar appropriate for the given identifier.
"""
assert interface == pb.IPerspective
assert mind == "BRAINS!"
if avatarId is checkers.ANONYMOUS:
return pb.IPerspective, Echoer(), lambda: None
else:
self.lastPerspective = self.perspectiveFactory(avatarId)
self.lastPerspective.loggedIn = True
return (
pb.IPerspective, self.lastPerspective,
self.lastPerspective.logout)
class MyView(pb.Viewable):
def view_check(self, user):
return isinstance(user, MyPerspective)
class NewCredTestCase(unittest.TestCase):
"""
Tests related to the L{twisted.cred} support in PB.
"""
def setUp(self):
"""
Create a portal with no checkers and wrap it around a simple test
realm. Set up a PB server on a TCP port which serves perspectives
using that portal.
"""
self.realm = TestRealm()
self.portal = portal.Portal(self.realm)
self.factory = ConnectionNotifyServerFactory(self.portal)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
def tearDown(self):
"""
Shut down the TCP port created by L{setUp}.
"""
return self.port.stopListening()
def getFactoryAndRootObject(self, clientFactory=pb.PBClientFactory):
"""
Create a connection to the test server.
@param clientFactory: the factory class used to create the connection.
@return: a tuple (C{factory}, C{deferred}), where factory is an
instance of C{clientFactory} and C{deferred} the L{Deferred} firing
with the PB root object.
"""
factory = clientFactory()
rootObjDeferred = factory.getRootObject()
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return factory, rootObjDeferred
def test_getRootObject(self):
"""
Assert only that L{PBClientFactory.getRootObject}'s Deferred fires with
a L{RemoteReference}.
"""
factory, rootObjDeferred = self.getFactoryAndRootObject()
def gotRootObject(rootObj):
self.assertIsInstance(rootObj, pb.RemoteReference)
disconnectedDeferred = Deferred()
rootObj.notifyOnDisconnect(disconnectedDeferred.callback)
factory.disconnect()
return disconnectedDeferred
return rootObjDeferred.addCallback(gotRootObject)
def test_deadReferenceError(self):
"""
Test that when a connection is lost, calling a method on a
RemoteReference obtained from it raises DeadReferenceError.
"""
factory, rootObjDeferred = self.getFactoryAndRootObject()
def gotRootObject(rootObj):
disconnectedDeferred = Deferred()
rootObj.notifyOnDisconnect(disconnectedDeferred.callback)
def lostConnection(ign):
self.assertRaises(
pb.DeadReferenceError,
rootObj.callRemote, 'method')
disconnectedDeferred.addCallback(lostConnection)
factory.disconnect()
return disconnectedDeferred
return rootObjDeferred.addCallback(gotRootObject)
def test_clientConnectionLost(self):
"""
Test that if the L{reconnecting} flag is passed with a True value then
a remote call made from a disconnection notification callback gets a
result successfully.
"""
class ReconnectOnce(pb.PBClientFactory):
reconnectedAlready = False
def clientConnectionLost(self, connector, reason):
reconnecting = not self.reconnectedAlready
self.reconnectedAlready = True
if reconnecting:
connector.connect()
return pb.PBClientFactory.clientConnectionLost(
self, connector, reason, reconnecting)
factory, rootObjDeferred = self.getFactoryAndRootObject(ReconnectOnce)
def gotRootObject(rootObj):
self.assertIsInstance(rootObj, pb.RemoteReference)
d = Deferred()
rootObj.notifyOnDisconnect(d.callback)
factory.disconnect()
def disconnected(ign):
d = factory.getRootObject()
def gotAnotherRootObject(anotherRootObj):
self.assertIsInstance(anotherRootObj, pb.RemoteReference)
d = Deferred()
anotherRootObj.notifyOnDisconnect(d.callback)
factory.disconnect()
return d
return d.addCallback(gotAnotherRootObject)
return d.addCallback(disconnected)
return rootObjDeferred.addCallback(gotRootObject)
def test_immediateClose(self):
"""
Test that if a Broker loses its connection without receiving any bytes,
it doesn't raise any exceptions or log any errors.
"""
serverProto = self.factory.buildProtocol(('127.0.0.1', 12345))
serverProto.makeConnection(protocol.FileWrapper(StringIO()))
serverProto.connectionLost(failure.Failure(main.CONNECTION_DONE))
def test_loginConnectionRefused(self):
"""
L{PBClientFactory.login} returns a L{Deferred} which is errbacked
with the L{ConnectionRefusedError} if the underlying connection is
refused.
"""
clientFactory = pb.PBClientFactory()
loginDeferred = clientFactory.login(
credentials.UsernamePassword("foo", "bar"))
clientFactory.clientConnectionFailed(
None,
failure.Failure(
ConnectionRefusedError("Test simulated refused connection")))
return self.assertFailure(loginDeferred, ConnectionRefusedError)
def _disconnect(self, ignore, factory):
"""
Helper method disconnecting the given client factory and returning a
C{Deferred} that will fire when the server connection has noticed the
disconnection.
"""
disconnectedDeferred = Deferred()
self.factory.protocolInstance.notifyOnDisconnect(
lambda: disconnectedDeferred.callback(None))
factory.disconnect()
return disconnectedDeferred
def test_loginLogout(self):
"""
Test that login can be performed with IUsernamePassword credentials and
that when the connection is dropped the avatar is logged out.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
creds = credentials.UsernamePassword("user", "pass")
# NOTE: real code probably won't need anything where we have the
# "BRAINS!" argument, passing None is fine. We just do it here to
# test that it is being passed. It is used to give additional info to
# the realm to aid perspective creation, if you don't need that,
# ignore it.
mind = "BRAINS!"
d = factory.login(creds, mind)
def cbLogin(perspective):
self.assertTrue(self.realm.lastPerspective.loggedIn)
self.assertIsInstance(perspective, pb.RemoteReference)
return self._disconnect(None, factory)
d.addCallback(cbLogin)
def cbLogout(ignored):
self.assertTrue(self.realm.lastPerspective.loggedOut)
d.addCallback(cbLogout)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_logoutAfterDecref(self):
"""
If a L{RemoteReference} to an L{IPerspective} avatar is decrefed and
there remain no other references to the avatar on the server, the
avatar is garbage collected and the logout method called.
"""
loggedOut = Deferred()
class EventPerspective(pb.Avatar):
"""
An avatar which fires a Deferred when it is logged out.
"""
def __init__(self, avatarId):
pass
def logout(self):
loggedOut.callback(None)
self.realm.perspectiveFactory = EventPerspective
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(foo='bar'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword('foo', 'bar'), "BRAINS!")
def cbLoggedIn(avatar):
# Just wait for the logout to happen, as it should since the
# reference to the avatar will shortly no longer exists.
return loggedOut
d.addCallback(cbLoggedIn)
def cbLoggedOut(ignored):
# Verify that the server broker's _localCleanup dict isn't growing
# without bound.
self.assertEqual(self.factory.protocolInstance._localCleanup, {})
d.addCallback(cbLoggedOut)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_concurrentLogin(self):
"""
Two different correct login attempts can be made on the same root
object at the same time and produce two different resulting avatars.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(
foo='bar', baz='quux'))
factory = pb.PBClientFactory()
firstLogin = factory.login(
credentials.UsernamePassword('foo', 'bar'), "BRAINS!")
secondLogin = factory.login(
credentials.UsernamePassword('baz', 'quux'), "BRAINS!")
d = gatherResults([firstLogin, secondLogin])
def cbLoggedIn((first, second)):
return gatherResults([
first.callRemote('getAvatarId'),
second.callRemote('getAvatarId')])
d.addCallback(cbLoggedIn)
def cbAvatarIds((first, second)):
self.assertEqual(first, 'foo')
self.assertEqual(second, 'baz')
d.addCallback(cbAvatarIds)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_badUsernamePasswordLogin(self):
"""
Test that a login attempt with an invalid user or invalid password
fails in the appropriate way.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
firstLogin = factory.login(
credentials.UsernamePassword('nosuchuser', 'pass'))
secondLogin = factory.login(
credentials.UsernamePassword('user', 'wrongpass'))
self.assertFailure(firstLogin, UnauthorizedLogin)
self.assertFailure(secondLogin, UnauthorizedLogin)
d = gatherResults([firstLogin, secondLogin])
def cleanup(ignore):
errors = self.flushLoggedErrors(UnauthorizedLogin)
self.assertEquals(len(errors), 2)
return self._disconnect(None, factory)
d.addCallback(cleanup)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLogin(self):
"""
Verify that a PB server using a portal configured with an checker which
allows IAnonymous credentials can be logged into using IAnonymous
credentials.
"""
self.portal.registerChecker(checkers.AllowAnonymousAccess())
factory = pb.PBClientFactory()
d = factory.login(credentials.Anonymous(), "BRAINS!")
def cbLoggedIn(perspective):
return perspective.callRemote('echo', 123)
d.addCallback(cbLoggedIn)
d.addCallback(self.assertEqual, 123)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLoginNotPermitted(self):
"""
Verify that without an anonymous checker set up, anonymous login is
rejected.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(credentials.Anonymous(), "BRAINS!")
self.assertFailure(d, UnhandledCredentials)
def cleanup(ignore):
errors = self.flushLoggedErrors(UnhandledCredentials)
self.assertEquals(len(errors), 1)
return self._disconnect(None, factory)
d.addCallback(cleanup)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLoginWithMultipleCheckers(self):
"""
Like L{test_anonymousLogin} but against a portal with a checker for
both IAnonymous and IUsernamePassword.
"""
self.portal.registerChecker(checkers.AllowAnonymousAccess())
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(credentials.Anonymous(), "BRAINS!")
def cbLogin(perspective):
return perspective.callRemote('echo', 123)
d.addCallback(cbLogin)
d.addCallback(self.assertEqual, 123)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_authenticatedLoginWithMultipleCheckers(self):
"""
Like L{test_anonymousLoginWithMultipleCheckers} but check that
username/password authentication works.
"""
self.portal.registerChecker(checkers.AllowAnonymousAccess())
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword('user', 'pass'), "BRAINS!")
def cbLogin(perspective):
return perspective.callRemote('add', 100, 23)
d.addCallback(cbLogin)
d.addCallback(self.assertEqual, 123)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_view(self):
"""
Verify that a viewpoint can be retrieved after authenticating with
cred.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword("user", "pass"), "BRAINS!")
def cbLogin(perspective):
return perspective.callRemote("getViewPoint")
d.addCallback(cbLogin)
def cbView(viewpoint):
return viewpoint.callRemote("check")
d.addCallback(cbView)
d.addCallback(self.assertTrue)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
class NonSubclassingPerspective:
implements(pb.IPerspective)
def __init__(self, avatarId):
pass
# IPerspective implementation
def perspectiveMessageReceived(self, broker, message, args, kwargs):
args = broker.unserialize(args, self)
kwargs = broker.unserialize(kwargs, self)
return broker.serialize((message, args, kwargs))
# Methods required by TestRealm
def logout(self):
self.loggedOut = True
class NSPTestCase(unittest.TestCase):
"""
Tests for authentication against a realm where the L{IPerspective}
implementation is not a subclass of L{Avatar}.
"""
def setUp(self):
self.realm = TestRealm()
self.realm.perspectiveFactory = NonSubclassingPerspective
self.portal = portal.Portal(self.realm)
self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.checker.addUser("user", "pass")
self.portal.registerChecker(self.checker)
self.factory = WrappingFactory(pb.PBServerFactory(self.portal))
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.addCleanup(self.port.stopListening)
self.portno = self.port.getHost().port
def test_NSP(self):
"""
An L{IPerspective} implementation which does not subclass
L{Avatar} can expose remote methods for the client to call.
"""
factory = pb.PBClientFactory()
d = factory.login(credentials.UsernamePassword('user', 'pass'),
"BRAINS!")
reactor.connectTCP('127.0.0.1', self.portno, factory)
d.addCallback(lambda p: p.callRemote('ANYTHING', 'here', bar='baz'))
d.addCallback(self.assertEquals,
('ANYTHING', ('here',), {'bar': 'baz'}))
def cleanup(ignored):
factory.disconnect()
for p in self.factory.protocols:
p.transport.loseConnection()
d.addCallback(cleanup)
return d
class IForwarded(Interface):
"""
Interface used for testing L{util.LocalAsyncForwarder}.
"""
def forwardMe():
"""
Simple synchronous method.
"""
def forwardDeferred():
"""
Simple asynchronous method.
"""
class Forwarded:
"""
Test implementation of L{IForwarded}.
@ivar forwarded: set if C{forwardMe} is called.
@type forwarded: C{bool}
@ivar unforwarded: set if C{dontForwardMe} is called.
@type unforwarded: C{bool}
"""
implements(IForwarded)
forwarded = False
unforwarded = False
def forwardMe(self):
"""
Set a local flag to test afterwards.
"""
self.forwarded = True
def dontForwardMe(self):
"""
Set a local flag to test afterwards. This should not be called as it's
not in the interface.
"""
self.unforwarded = True
def forwardDeferred(self):
"""
Asynchronously return C{True}.
"""
return succeed(True)
class SpreadUtilTestCase(unittest.TestCase):
"""
Tests for L{twisted.spread.util}.
"""
def test_sync(self):
"""
Call a synchronous method of a L{util.LocalAsRemote} object and check
the result.
"""
o = LocalRemoteTest()
self.assertEquals(o.callRemote("add1", 2), 3)
def test_async(self):
"""
Call an asynchronous method of a L{util.LocalAsRemote} object and check
the result.
"""
o = LocalRemoteTest()
o = LocalRemoteTest()
d = o.callRemote("add", 2, y=4)
self.assertIsInstance(d, Deferred)
d.addCallback(self.assertEquals, 6)
return d
def test_asyncFail(self):
"""
Test a asynchronous failure on a remote method call.
"""
o = LocalRemoteTest()
d = o.callRemote("fail")
def eb(f):
self.assertTrue(isinstance(f, failure.Failure))
f.trap(RuntimeError)
d.addCallbacks(lambda res: self.fail("supposed to fail"), eb)
return d
def test_remoteMethod(self):
"""
Test the C{remoteMethod} facility of L{util.LocalAsRemote}.
"""
o = LocalRemoteTest()
m = o.remoteMethod("add1")
self.assertEquals(m(3), 4)
def test_localAsyncForwarder(self):
"""
Test a call to L{util.LocalAsyncForwarder} using L{Forwarded} local
object.
"""
f = Forwarded()
lf = util.LocalAsyncForwarder(f, IForwarded)
lf.callRemote("forwardMe")
self.assertTrue(f.forwarded)
lf.callRemote("dontForwardMe")
self.assertFalse(f.unforwarded)
rr = lf.callRemote("forwardDeferred")
l = []
rr.addCallback(l.append)
self.assertEqual(l[0], 1)
class PBWithSecurityOptionsTest(unittest.TestCase):
"""
Test security customization.
"""
def test_clientDefaultSecurityOptions(self):
"""
By default, client broker should use C{jelly.globalSecurity} as
security settings.
"""
factory = pb.PBClientFactory()
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, jelly.globalSecurity)
def test_serverDefaultSecurityOptions(self):
"""
By default, server broker should use C{jelly.globalSecurity} as
security settings.
"""
factory = pb.PBServerFactory(Echoer())
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, jelly.globalSecurity)
def test_clientSecurityCustomization(self):
"""
Check that the security settings are passed from the client factory to
the broker object.
"""
security = jelly.SecurityOptions()
factory = pb.PBClientFactory(security=security)
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, security)
def test_serverSecurityCustomization(self):
"""
Check that the security settings are passed from the server factory to
the broker object.
"""
security = jelly.SecurityOptions()
factory = pb.PBServerFactory(Echoer(), security=security)
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, security)
class DeprecationTests(unittest.TestCase):
"""
Tests for certain deprecations of free-functions in L{twisted.spread.pb}.
"""
def test_noOperationDeprecated(self):
"""
L{pb.noOperation} is deprecated.
"""
self.callDeprecated(
Version("twisted", 8, 2, 0),
pb.noOperation, 1, 2, x=3, y=4)
def test_printTraceback(self):
"""
L{pb.printTraceback} is deprecated.
"""
self.callDeprecated(
Version("twisted", 8, 2, 0),
pb.printTraceback,
"printTraceback deprecation fake traceback value")
| apache-2.0 |
romain-dartigues/ansible | lib/ansible/modules/network/avi/avi_poolgroupdeploymentpolicy.py | 31 | 5876 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_poolgroupdeploymentpolicy
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of PoolGroupDeploymentPolicy Avi RESTful Object
description:
- This module is used to configure PoolGroupDeploymentPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
auto_disable_old_prod_pools:
description:
- It will automatically disable old production pools once there is a new production candidate.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
cloud_ref:
description:
- It is a reference to an object of type cloud.
description:
description:
- User defined description for the object.
evaluation_duration:
description:
- Duration of evaluation period for automatic deployment.
- Allowed values are 60-86400.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
name:
description:
- The name of the pool group deployment policy.
required: true
rules:
description:
- List of pgdeploymentrule.
scheme:
description:
- Deployment scheme.
- Enum options - BLUE_GREEN, CANARY.
- Default value when not specified in API or module is interpreted by Avi Controller as BLUE_GREEN.
target_test_traffic_ratio:
description:
- Target traffic ratio before pool is made production.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
- Units(RATIO).
tenant_ref:
description:
- It is a reference to an object of type tenant.
test_traffic_ratio_rampup:
description:
- Ratio of the traffic that is sent to the pool under test.
- Test ratio of 100 means blue green.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the pool group deployment policy.
webhook_ref:
description:
- Webhook configured with url that avi controller will pass back information about pool group, old and new pool information and current deployment
- rule results.
- It is a reference to an object of type webhook.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PoolGroupDeploymentPolicy object
avi_poolgroupdeploymentpolicy:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_poolgroupdeploymentpolicy
"""
RETURN = '''
obj:
description: PoolGroupDeploymentPolicy (api/poolgroupdeploymentpolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
auto_disable_old_prod_pools=dict(type='bool',),
cloud_ref=dict(type='str',),
description=dict(type='str',),
evaluation_duration=dict(type='int',),
name=dict(type='str', required=True),
rules=dict(type='list',),
scheme=dict(type='str',),
target_test_traffic_ratio=dict(type='int',),
tenant_ref=dict(type='str',),
test_traffic_ratio_rampup=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
webhook_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'poolgroupdeploymentpolicy',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
candy7393/VTK | Wrapping/Python/vtk/tk/vtkLoadPythonTkWidgets.py | 6 | 2873 | import sys, os, string
import vtkCommonCorePython
def vtkLoadPythonTkWidgets(interp):
"""vtkLoadPythonTkWidgets(interp) -- load vtk-tk widget extensions
This is a mess of mixed python and tcl code that searches for the
shared object file that contains the python-vtk-tk widgets. Both
the python path and the tcl path are searched.
"""
X = vtkCommonCorePython.vtkVersion.GetVTKMajorVersion()
Y = vtkCommonCorePython.vtkVersion.GetVTKMinorVersion()
modname = 'vtkRenderingPythonTkWidgets'
name = '%s-%d.%d' % (modname,X,Y)
pkgname = string.capitalize(string.lower(modname))
# find out if the module is already loaded
loadedpkgs = interp.call('info', 'loaded')
found = False
try:
# check for result returned as a string
found = (loadedpkgs.find(pkgname) >= 0)
except AttributeError:
# check for result returned as nested tuples
for pkgtuple in loadedpkgs:
found |= (pkgname in pkgtuple)
if found:
return
# create the platform-dependent file name
prefix = ''
if sys.platform == 'cygwin':
prefix = 'cyg'
elif os.name == 'posix':
prefix = 'lib'
extension = interp.call('info', 'sharedlibextension')
filename = prefix+name+extension
# create an extensive list of paths to search
pathlist = sys.path
# add tcl paths, ensure that {} is handled properly
try:
auto_paths = string.split(interp.getvar('auto_path'))
except AttributeError:
auto_paths = interp.getvar('auto_path')
for path in auto_paths:
prev = str(pathlist[-1])
try:
# try block needed when one uses Gordon McMillan's Python
# Installer.
if len(prev) > 0 and prev[0] == '{' and prev[-1] != '}':
pathlist[-1] = prev+' '+path
else:
pathlist.append(path)
except AttributeError:
pass
# a common place for these sorts of things
if os.name == 'posix':
pathlist.append('/usr/local/lib')
# attempt to load
for path in pathlist:
try:
# If the path object is not str, it means that it is a
# Tkinter path object.
if (not isinstance(path, str) and not isinstance(path, unicode)):
path = path.string
# try block needed when one uses Gordon McMillan's Python
# Installer.
if len(path) > 0 and path[0] == '{' and path[-1] == '}':
path = path[1:-1]
fullpath = os.path.join(path, filename)
except AttributeError:
pass
if ' ' in fullpath:
fullpath = '{'+fullpath+'}'
if interp.eval('catch {load '+fullpath+' '+pkgname+'}') == '0':
return
# re-generate the error
interp.call('load', filename, pkgname)
| bsd-3-clause |
mercycorps/TolaActivity | tola/test/utils.py | 1 | 10079 | import time
import datetime
from decimal import Decimal, ROUND_HALF_UP
from contextlib import contextmanager
from factory import Sequence
from django.utils import timezone, translation
from django.core.exceptions import ImproperlyConfigured
from django.test import runner
from unittest import runner as ut_runner
from factories.indicators_models import IndicatorFactory, PeriodicTargetFactory, ResultFactory
from factories.workflow_models import ProgramFactory, CountryFactory
from indicators.views.views_indicators import generate_periodic_targets
from indicators.models import Indicator, PeriodicTarget
from workflow.models import Program
class PeriodicTargetValues(object):
def __init__(self, target=0, results=None, evidence=None):
self.target = target
self.results = results or []
self.evidence = evidence or []
@property
def result_sum(self):
return sum(self.results)
class IndicatorValues(object):
def __init__(
self,
periodic_targets,
is_cumulative=False,
direction_of_change=Indicator.DIRECTION_OF_CHANGE_NONE,
target_frequency=Indicator.ANNUAL,
lop_target=0,
unit_of_measure_type=Indicator.NUMBER):
self.periodic_targets = periodic_targets
self.is_cumulative = is_cumulative
self.direction_of_change = direction_of_change
self.target_frequency = target_frequency
self.lop_target = lop_target
self.unit_of_measure_type = unit_of_measure_type
@property
def result_sum(self):
# For program total or program-to-date, calculation is same if indicator is cumulative or not.
if self.unit_of_measure_type == Indicator.NUMBER:
return sum([pt_values.result_sum for pt_values in self.periodic_targets])
else:
return self.periodic_targets[-1].results[-1]
@property
def result_sets(self):
result_values_sets = []
for pt_values in self.periodic_targets:
result_values_sets.append([decimalize(cd_value) for cd_value in pt_values.results])
return result_values_sets
@property
def periodic_target_targets(self):
return [decimalize(pt.target) for pt in self.periodic_targets]
@property
def result_sum_by_periodic_target(self):
result_sums = []
for i, pt_values in enumerate(self.periodic_targets):
if self.unit_of_measure_type == Indicator.NUMBER:
if self.is_cumulative:
# Sum a list of lists. These are lists of results "to-date".
result_sums.append(sum([sum(vals) for vals in self.result_sets[:i+1]]))
else:
result_sums.append(sum(self.result_sets[i]))
else:
result_sums.append(self.result_sets[i][-1])
return result_sums
def program_to_date_achieved_ratio(self, period_ceiling=None):
achieved_by_period = self.result_sum_by_periodic_target[:period_ceiling]
targets_by_period = self.periodic_target_targets[:period_ceiling]
if self.unit_of_measure_type == Indicator.NUMBER:
achieved_val = sum(achieved_by_period)
if self.is_cumulative:
target_val = targets_by_period[:-1]
else:
target_val = sum(targets_by_period)
else:
achieved_val = achieved_by_period[:-1]
target_val = targets_by_period[:-1]
return achieved_val / target_val
def __unicode__(self):
return 'Indicator with %s periodic targets' % (len(self.periodic_targets))
def __str__(self):
return unicode(self).encode('utf-8')
class Scenario(object):
def __init__(self, **kwargs):
if kwargs['indicators']:
self.indicators = kwargs['indicators']
else:
self.indicators = []
def percent_indicators_on_target(self):
return 3
# Load scenario values into the database
def instantiate_scenario(program_id, scenario, existing_indicator_ids=None):
if existing_indicator_ids and len(scenario.indicators) != len(existing_indicator_ids):
raise ImproperlyConfigured(
"Can't instatiate scenario, indicator count (%s) doesn't match scenario indicator count (%s)" %
(len(existing_indicator_ids), len(scenario.indicators)))
indicator_ids = []
program = Program.objects.get(id=program_id)
for n, indicator_value_set in enumerate(scenario.indicators):
if existing_indicator_ids:
indicator = Indicator.objects.get(id=existing_indicator_ids[n])
else:
indicator = IndicatorFactory(
program=program,
is_cumulative=indicator_value_set.is_cumulative,
unit_of_measure_type=indicator_value_set.unit_of_measure_type,
direction_of_change=indicator_value_set.direction_of_change,
target_frequency=indicator_value_set.target_frequency,
lop_target=indicator_value_set.lop_target,)
# It's important to return indicator_ids in order. Preserving the creation order also preserves
# the link between the scenario and the indicator that was created in the database.
indicator_ids.append(indicator.id)
make_targets(program, indicator)
periodic_targets = PeriodicTarget.objects.filter(indicator__id=indicator.id)
if len(periodic_targets) != len(indicator_value_set.periodic_targets):
raise ImproperlyConfigured(
"Scenario's periodic target count (%s) doesn't match program-based periodic target count (%s)." %
(len(indicator_value_set.periodic_targets), len(periodic_targets)))
for i, pt in enumerate(periodic_targets):
pt.target = indicator_value_set.periodic_targets[i].target
pt.save()
try:
evidence_values = indicator_value_set.periodic_targets[i].evidence
except KeyError:
evidence_values = []
for j, res_value in enumerate(indicator_value_set.periodic_targets[i].results):
dc = pt.start_date
if dc is None:
dc = pt.indicator.program.reporting_period_start
dc = dc + datetime.timedelta(days=j)
res = ResultFactory(
periodic_target=pt, indicator=indicator, program=program, achieved=res_value, date_collected=dc)
if evidence_values and evidence_values[j]:
res.evidence_url = 'http://www.example.com'
res.record_name = 'evidence name'
res.save()
return indicator_ids
# Generate anonymous indicators and programs
def generate_core_indicator_data(c_params=None, p_count=3, i_count=4):
"""
Create up to 5 countries and an arbitrary number of related programs and indicators
"""
if c_params is None:
c_params = [
('Colombia', 'CO'),
('Tunisia', 'TN'),
]
program_ids = []
indicator_ids = []
for i in range(len(c_params)):
country = CountryFactory(country=c_params[i][0], code=c_params[i][1])
programs = ProgramFactory.create_batch(
p_count, countries=[country], name=Sequence(lambda n: 'Program %s %s' % (country.code, n)),
funding_status="Funded",
)
for p in programs:
program_ids.append(p.id)
indicators = IndicatorFactory.create_batch(
i_count, program=p, unit_of_measure_type=Indicator.NUMBER, is_cumulative=False,
direction_of_change=Indicator.DIRECTION_OF_CHANGE_NONE, target_frequency=Indicator.ANNUAL)
indicator_ids = [i.id for i in indicators]
p.indicator_set.add(*indicators)
return program_ids, indicator_ids
def make_targets(program, indicator):
num_periods = len([x for x in program.get_periods_for_frequency(indicator.target_frequency)])
targets_json = generate_periodic_targets(
tf=indicator.target_frequency, start_date=program.reporting_period_start, numTargets=num_periods)
for i, pt in enumerate(targets_json):
PeriodicTargetFactory(
indicator=indicator,
customsort=i,
start_date=pt['start_date'],
end_date=pt['end_date'],
edit_date=timezone.now())
def decimalize(number):
return Decimal(number).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
class TimedTestResult(ut_runner.TextTestResult):
SLOW_TEST_THRESHOLD = 2
def __init__(self, *args, **kwargs):
super(TimedTestResult, self).__init__(*args, **kwargs)
self.test_timings = []
def startTest(self, test):
self._started_at = time.time()
super(TimedTestResult, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self._started_at
name = self.getDescription(test)
if elapsed > self.SLOW_TEST_THRESHOLD:
msg = "{} ({:.03}s)".format(name, elapsed)
self.test_timings.append(msg)
super(TimedTestResult, self).addSuccess(test)
def slowTests(self):
return len(self.test_timings) > 0
def getTestTimings(self):
return self.test_timings
class TimedTestRunner(runner.DiscoverRunner):
def get_resultclass(self):
return TimedTestResult
def run_suite(self, suite, **kwargs):
result = super(TimedTestRunner, self).run_suite(suite, **kwargs)
if result.slowTests():
result.stream.writeln(
"Slow Tests (greater than {0}s):".format(result.SLOW_TEST_THRESHOLD)
)
for msg in result.getTestTimings():
result.stream.writeln(msg)
return result
SPECIAL_CHARS = 'Spécîal Chårs 1234 !@#$; DROP TABLE indicators.indicator;'
@contextmanager
def lang_context(lang):
try:
translation.activate(lang)
yield
finally:
translation.activate('en') | apache-2.0 |
amydu703/quipsample | node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/eclipse.py | 1825 | 17014 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
Mafarricos/Mafarricos-modded-xbmc-addons | plugin.video.xbmctorrentV2/resources/site-packages/bs4/tests/test_tree.py | 292 | 70169 | # -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
CData,
Comment,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup(u'<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(text=u'Räksmörgås'), u'Räksmörgås')
def test_find_everything(self):
"""Test an optimization that finds all tags."""
soup = self.soup("<a>foo</a><b>bar</b>")
self.assertEqual(2, len(soup.find_all()))
def test_find_everything_with_name(self):
"""Test an optimization that finds all tags with a given name."""
soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>")
self.assertEqual(2, len(soup.find_all('a')))
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(text="bar"), [u"bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
[u"Foo", u"bar", u'\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
[u"Foo", u"bar", u'\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self):
soup = self.soup("<a></a>")
# Create a self-referential list.
l = []
l.append(l)
# Without special code in _normalize_search_value, this would cause infinite
# recursion.
self.assertEqual([], soup.find_all(l))
def test_find_all_resultset(self):
"""All find_all calls return a ResultSet"""
soup = self.soup("<a></a>")
result = soup.find_all("a")
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(True)
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(text="foo")
self.assertTrue(hasattr(result, "source"))
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_name_and_text(self):
self.assertSelects(
self.tree.find_all('a', text='First tag.'), ['First tag.'])
self.assertSelects(
self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.'])
self.assertSelects(
self.tree.find_all('a', text=re.compile("tag")),
['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_utf8_attribute_value(self):
peace = u"םולש".encode("utf8")
data = u'<a title="םולש"></a>'.encode("utf8")
soup = self.soup(data)
self.assertEqual([soup.a], soup.find_all(title=peace))
self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8")))
self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"]))
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
# Passing in the class_ keyword argument will search against
# the 'class' attribute.
self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.'])
self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.'])
# Passing in a string to 'attrs' will also search the CSS class.
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
f = tree.find_all("gar", class_=re.compile("o"))
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", class_=re.compile("a"))
self.assertSelects(f, ["Found it"])
# Since the class is not the string "foo bar", but the two
# strings "foo" and "bar", this will not find anything.
f = tree.find_all("gar", class_=re.compile("o b"))
self.assertSelects(f, [])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the class as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", class_="foo bar"))
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_atribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<div>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</div>""")
div = tree.div
for i, element in enumerate(div.contents):
self.assertEqual(i, div.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
def test_new_string_can_create_navigablestring_subclass(self):
soup = self.soup("")
s = soup.new_string("foo", Comment)
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, Comment))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_unwrap_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.unwrap()
self.assertEqual(a, new_a)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_append_child_thats_already_at_the_end(self):
data = "<a><b></b></a>"
soup = self.soup(data)
soup.a.append(soup.b)
self.assertEqual(data, soup.decode())
def test_move_tag_to_beginning_of_parent(self):
data = "<a><b></b><c></c><d></d></a>"
soup = self.soup(data)
soup.a.insert(0, soup.d)
self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode())
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_exception_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(NotImplementedError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(NotImplementedError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_replace_first_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.b.replace_with(soup.c)
self.assertEqual("<a><c></c></a>", soup.decode())
def test_replace_last_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.c.replace_with(soup.b)
self.assertEqual("<a><b></b></a>", soup.decode())
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_unwrap(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.unwrap()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_wrap(self):
soup = self.soup("I wish I was bold.")
value = soup.string.wrap(soup.new_tag("b"))
self.assertEqual(value.decode(), "<b>I wish I was bold.</b>")
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_extracts_tag_from_elsewhere(self):
soup = self.soup("<b></b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_puts_new_contents_at_the_end(self):
soup = self.soup("<b>I like being bold.</b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(2, len(soup.b.contents))
self.assertEqual(
soup.decode(), self.document_for(
"<b>I like being bold.I wish I was bold.</b>"))
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertEqual(0, len(em.contents))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
def test_string_set_does_not_affect_original_string(self):
soup = self.soup("<a><b>foo</b><c>bar</c>")
soup.b.string = soup.c.string
self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>")
def test_set_string_preserves_class_of_string(self):
soup = self.soup("<a></a>")
cdata = CData("foo")
soup.a.string = cdata
self.assertTrue(isinstance(soup.a.string, CData))
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
def test_get_text_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(soup.get_text(), "foobar")
self.assertEqual(
soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar")
self.assertEqual(
soup.get_text(types=None), "fooIGNOREbar")
def test_all_strings_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(['foo', 'bar'], list(soup.strings))
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
def test_string_has_immutable_name_property(self):
string = self.soup("s").string
self.assertEqual(None, string.name)
def t():
string.name = 'foo'
self.assertRaises(AttributeError, t)
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = u"<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for(u"<b><FOO></b><b>BAR</b>"))
def test_formatter_is_run_on_attribute_values(self):
markup = u'<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_minimal, a.decode())
self.assertEqual(expect_minimal, a.decode(formatter="minimal"))
expect_html = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_html, a.decode(formatter="html"))
self.assertEqual(markup, a.decode(formatter=None))
expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>'
self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper()))
def test_formatter_skips_script_tag_for_html_documents(self):
doc = """
<script type="text/javascript">
console.log("< < hey > > ");
</script>
"""
encoded = BeautifulSoup(doc).encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_formatter_skips_style_tag_for_html_documents(self):
doc = """
<style type="text/css">
console.log("< < hey > > ");
</style>
"""
encoded = BeautifulSoup(doc).encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_prettify_leaves_preformatted_text_alone(self):
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ")
# Everything outside the <pre> tag is reformatted, but everything
# inside is left alone.
self.assertEqual(
u'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>',
soup.div.prettify())
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>")
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(unicode, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
u"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
def test_decode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents())
def test_encode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents(
encoding="utf8"))
def test_deprecated_renderContents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
self.assertEqual(
b"<![CDATA[<><><>]]>", soup.encode(formatter=increment))
self.assertEqual(1, self.count)
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<div id="main" class="fancy">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
</span>
</div>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML)
def assertSelects(self, selector, expected_ids):
el_ids = [el['id'] for el in self.soup.select(selector)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, [u'The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 3)
for div in els:
self.assertEqual(div.name, 'div')
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['main', 'inner', 'footer'])
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertRaises(ValueError, self.soup.select, 'tag%t')
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse(els[0].has_attr('class'))
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_child_selector_id(self):
self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']),
('div[id$="1"]', []),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('a[href*="http://"]', ['bob', 'me']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']),
('div[id*="1"]', []),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
)
def test_nth_of_type(self):
# Try to select first paragraph
els = self.soup.select('div#inner p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Some text')
# Try to select third paragraph
els = self.soup.select('div#inner p:nth-of-type(3)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Another')
# Try to select (non-existent!) fourth paragraph
els = self.soup.select('div#inner p:nth-of-type(4)')
self.assertEqual(len(els), 0)
# Pass in an invalid value.
self.assertRaises(
ValueError, self.soup.select, 'div p:nth-of-type(0)')
def test_nth_of_type_direct_descendant(self):
els = self.soup.select('div#inner > p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Some text')
def test_id_child_selector_nth_of_type(self):
self.assertSelects('#inner > p:nth-of-type(2)', ['p1'])
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner'])
def test_overspecified_child_id(self):
self.assertSelects(".fancy #inner", ['inner'])
self.assertSelects(".normal #inner", [])
def test_adjacent_sibling_selector(self):
self.assertSelects('#p1 + h2', ['header2'])
self.assertSelects('#p1 + h2 + p', ['pmulti'])
self.assertSelects('#p1 + #header2 + .class1', ['pmulti'])
self.assertEqual([], self.soup.select('#p1 + p'))
def test_general_sibling_selector(self):
self.assertSelects('#p1 ~ h2', ['header2', 'header3'])
self.assertSelects('#p1 ~ #header2', ['header2'])
self.assertSelects('#p1 ~ h2 + a', ['me'])
self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me'])
self.assertEqual([], self.soup.select('#inner ~ h2'))
def test_dangling_combinator(self):
self.assertRaises(ValueError, self.soup.select, 'h1 >')
def test_sibling_combinator_wont_select_same_tag_twice(self):
self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
| gpl-2.0 |
JingJunYin/tensorflow | tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py | 13 | 52856 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow training Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def _gen_learner_config(num_classes,
l1_reg,
l2_reg,
tree_complexity,
max_depth,
min_node_weight,
pruning_mode,
growing_mode,
dropout_probability=None,
dropout_learning_rate=None,
dropout_prob_of_skipping=None):
"""Create a serialized learner config with the desired settings."""
config = learner_pb2.LearnerConfig()
config.num_classes = num_classes
config.regularization.l1 = l1_reg
config.regularization.l2 = l2_reg
config.regularization.tree_complexity = tree_complexity
config.constraints.max_tree_depth = max_depth
config.constraints.min_node_weight = min_node_weight
config.pruning_mode = pruning_mode
config.growing_mode = growing_mode
if dropout_probability is not None:
config.learning_rate_tuner.dropout.dropout_probability = dropout_probability
if dropout_learning_rate is not None:
config.learning_rate_tuner.dropout.learning_rate = dropout_learning_rate
if dropout_prob_of_skipping is not None:
config.learning_rate_tuner.dropout.dropout_prob_of_skipping = (
dropout_prob_of_skipping)
return config.SerializeToString()
def _gen_dense_split_info(fc, threshold, left_weight, right_weight):
split_str = """
split_node {
dense_float_binary_split {
feature_column: %d
threshold: %f
}
}
left_child {
sparse_vector {
index: 0
value: %f
}
}
right_child {
sparse_vector {
index: 0
value: %f
}
}""" % (fc, threshold, left_weight, right_weight)
split = split_info_pb2.SplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _gen_categorical_split_info(fc, feat_id, left_weight, right_weight):
split_str = """
split_node {
categorical_id_binary_split {
feature_column: %d
feature_id: %d
}
}
left_child {
sparse_vector {
index: 0
value: %f
}
}
right_child {
sparse_vector {
index: 0
value: %f
}
}""" % (fc, feat_id, left_weight, right_weight)
split = split_info_pb2.SplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _get_bias_update(grads, hess):
return array_ops.where(hess > 0, -grads / hess, array_ops.zeros_like(grads))
class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase):
"""Tests for centering tree ensemble bias."""
def testCenterBias(self):
"""Tests bias centering for multiple iterations."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=3,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=4,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here.
dropout_probability=0.5)
# Center bias for the initial step.
grads = constant_op.constant([0.4, -0.3])
hess = constant_op.constant([2.0, 1.0])
continue_centering1 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering1)
self.assertEqual(continue_centering, True)
# Validate ensemble state.
# dim 0 update: -0.4/2.0 = -0.2
# dim 1 update: +0.3/1.0 = +0.3
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
vector {
value: -0.2
value: 0.3
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Center bias for another step.
# dim 0 update: -0.06/0.5 = -0.12
# dim 1 update: -0.01/0.5 = -0.02
grads = constant_op.constant([0.06, 0.01])
hess = constant_op.constant([0.5, 0.5])
continue_centering2 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering2)
self.assertEqual(continue_centering, True)
# Validate ensemble state.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
vector {
value: -0.32
value: 0.28
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Center bias for another step, but this time updates are negligible.
grads = constant_op.constant([0.0000001, -0.00003])
hess = constant_op.constant([0.5, 0.0])
continue_centering3 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=2,
next_stamp_token=3,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering3)
self.assertEqual(continue_centering, False)
# Validate ensemble stamp.
new_stamp, _ = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=3))
self.assertEqual(new_stamp, 3)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
"""Tests for growing tree ensemble from split candidates."""
def testGrowEmptyEnsemble(self):
"""Test growing an empty ensemble."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here, tree is not finalized.
dropout_probability=0.5)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the simpler split from handler 1 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.test_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.61999988556
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.14300012589
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
}
tree_weights: 0.10000000149
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=3,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here - tree is not finalized.
dropout_probability=0.5)
# Prepare handler inputs.
# Handler 1 only has a candidate for partition 1, handler 2 has candidates
# for both partitions and handler 3 only has a candidate for partition 2.
handler1_partitions = np.array([1], dtype=np.int32)
handler1_gains = np.array([1.4], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.21, -6.0, 1.65)]
handler2_partitions = np.array([1, 2], dtype=np.int32)
handler2_gains = np.array([0.63, 2.7], dtype=np.float32)
handler2_split = [
_gen_dense_split_info(0, 0.23, -0.6, 0.24),
_gen_categorical_split_info(1, 7, -1.5, 2.3)
]
handler3_partitions = np.array([2], dtype=np.int32)
handler3_gains = np.array([1.7], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 3, -0.75, 1.93)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the split for partition 1 to be chosen from handler 1 and
# the split for partition 2 to be chosen from handler 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.61999988556
}
}
nodes {
dense_float_binary_split {
threshold: 0.21
left_id: 3
right_id: 4
}
node_metadata {
gain: 1.4
}
}
nodes {
categorical_id_binary_split {
feature_column: 1
feature_id: 7
left_id: 5
right_id: 6
}
node_metadata {
gain: 2.7
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -6.0
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 1.65
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -1.5
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeFinalized(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.test_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
tree_weights: 0.10000000149
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(5, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(2, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(8, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.2,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect a new tree to be added with the split from handler 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
trees {
nodes {
dense_float_binary_split {
feature_column: 5
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 2)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 2)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 2)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsemblePrePrune(self):
"""Test growing an ensemble with pre-pruning."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# All handlers have negative gain.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([-0.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, 0.01, 0.0143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([-1.3], dtype=np.float32)
handler2_split = [_gen_categorical_split_info(0, 7, 0.013, 0.0143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions, handler2_partitions],
gains=[handler1_gains, handler2_gains],
splits=[handler1_split, handler2_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the ensemble to be empty.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 0)
self.assertEqual(stats.active_tree, 0)
self.assertEqual(stats.active_layer, 0)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals("""
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
def testGrowEnsemblePostPruneNone(self):
"""Test growing an empty ensemble."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.POST_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the simpler split from handler 1 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsemblePostPruneAll(self):
"""Test growing an ensemble with post-pruning."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=2,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.POST_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# All handlers have negative gain.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([-1.3], dtype=np.float32)
handler1_split = [_gen_categorical_split_info(0, 7, 0.013, 0.0143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([-0.62], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.33, 0.01, 0.0143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions, handler2_partitions],
gains=[handler1_gains, handler2_gains],
splits=[handler1_split, handler2_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the split from handler 2 to be chosen despite the negative gain.
# The grown tree should not be finalized as max tree depth is 2 so no
# pruning occurs.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
tree_ensemble_config.ParseFromString(serialized)
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.33
left_id: 1
right_id: 2
}
node_metadata {
gain: -0.62
original_leaf {
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.01
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.0143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Prepare handler inputs.
# All handlers have negative gain.
handler1_partitions = np.array([1, 2], dtype=np.int32)
handler1_gains = np.array([-0.2, -0.5], dtype=np.float32)
handler1_split = [
_gen_categorical_split_info(3, 7, 0.07, 0.083),
_gen_categorical_split_info(3, 5, 0.041, 0.064)
]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
learning_rate=0.1,
partition_ids=[handler1_partitions],
gains=[handler1_gains],
splits=[handler1_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the ensemble to be empty as post-pruning will prune
# the entire finalized tree.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 0)
self.assertEqual(stats.active_tree, 0)
self.assertEqual(stats.active_layer, 0)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals("""
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
def testGrowEnsemblePostPrunePartial(self):
"""Test growing an ensemble with post-pruning."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=2,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.POST_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# Second handler has positive gain.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([-1.3], dtype=np.float32)
handler1_split = [_gen_categorical_split_info(0, 7, 0.013, 0.0143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([-0.2], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.33, 0.01, 0.0143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[handler1_partitions, handler2_partitions],
gains=[handler1_gains, handler2_gains],
splits=[handler1_split, handler2_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the split from handler 2 to be chosen despite the negative gain.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.33
left_id: 1
right_id: 2
}
node_metadata {
gain: -0.2
original_leaf {
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.01
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.0143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Prepare handler inputs for second layer.
# Note that partition 1 gain is negative and partition 2 gain is positive.
handler1_partitions = np.array([1, 2], dtype=np.int32)
handler1_gains = np.array([-0.2, 0.5], dtype=np.float32)
handler1_split = [
_gen_categorical_split_info(3, 7, 0.07, 0.083),
_gen_categorical_split_info(3, 5, 0.041, 0.064)
]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
learning_rate=0.1,
partition_ids=[handler1_partitions],
gains=[handler1_gains],
splits=[handler1_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the negative gain split of partition 1 to be pruned and the
# positive gain split of partition 2 to be retained.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.33
left_id: 1
right_id: 2
}
node_metadata {
gain: -0.2
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.01
}
}
}
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 5
left_id: 3
right_id: 4
}
node_metadata {
gain: 0.5
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.041
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 0.064
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEnsembleTreeLayerByLayer(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.test_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
}
tree_weights: 0.10000000149
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=3,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.LAYER_BY_LAYER,
# Dropout will have no effect, since the tree will not be fully grown.
dropout_probability=1.0)
# Prepare handler inputs.
# Handler 1 only has a candidate for partition 1, handler 2 has candidates
# for both partitions and handler 3 only has a candidate for partition 2.
handler1_partitions = np.array([1], dtype=np.int32)
handler1_gains = np.array([1.4], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.21, -6.0, 1.65)]
handler2_partitions = np.array([1, 2], dtype=np.int32)
handler2_gains = np.array([0.63, 2.7], dtype=np.float32)
handler2_split = [
_gen_dense_split_info(0, 0.23, -0.6, 0.24),
_gen_categorical_split_info(1, 7, -1.5, 2.3)
]
handler3_partitions = np.array([2], dtype=np.int32)
handler3_gains = np.array([1.7], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 3, -0.75, 1.93)]
# Grow tree ensemble layer by layer.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect the split for partition 1 to be chosen from handler 1 and
# the split for partition 2 to be chosen from handler 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
# The partition 1 split weights get added to original leaf weight 7.143.
# The partition 2 split weights get added to original leaf weight -4.375.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
dense_float_binary_split {
threshold: 0.21
left_id: 3
right_id: 4
}
node_metadata {
gain: 1.4
}
}
nodes {
categorical_id_binary_split {
feature_column: 1
feature_id: 7
left_id: 5
right_id: 6
}
node_metadata {
gain: 2.7
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 1.143
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 8.793
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -5.875
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -2.075
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 2)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 2)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 2)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeFinalizedWithDropout(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.test_session() as session:
# Create existing ensemble with one root split and one bias tree.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
leaf {
vector {
value: -0.32
value: 0.28
}
}
}
}
trees {
nodes {
categorical_id_binary_split {
feature_column: 3
feature_id: 7
left_id: 1
right_id: 2
}
node_metadata {
gain: 1.3
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 2.3
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -0.9
}
}
}
}
tree_weights: 0.7
tree_weights: 1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 5
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
}
""", tree_ensemble_config)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
dropout_probability=1.0)
# Prepare handler inputs.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(5, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(2, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(8, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config,
dropout_seed=123,
center_bias=True)
session.run(grow_op)
# Expect a new tree to be added with the split from handler 1.
_, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
tree_ensemble_config.ParseFromString(serialized)
self.assertEqual(3, len(tree_ensemble_config.trees))
# Both trees got 0.5 as weights, bias tree is untouched.
self.assertAllClose([0.7, 0.5, 0.5], tree_ensemble_config.tree_weights)
self.assertEqual(
1, tree_ensemble_config.tree_metadata[0].num_tree_weight_updates)
self.assertEqual(
6, tree_ensemble_config.tree_metadata[1].num_tree_weight_updates)
self.assertEqual(
2, tree_ensemble_config.tree_metadata[2].num_tree_weight_updates)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
sbellem/django | django/core/management/commands/dumpdata.py | 305 | 8545 | from collections import OrderedDict
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, router
class Command(BaseCommand):
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.')
parser.add_argument('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.')
parser.add_argument('--indent', default=None, dest='indent', type=int,
help='Specifies the indent level to use when pretty-printing output.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.')
parser.add_argument('-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).')
parser.add_argument('--natural-foreign', action='store_true', dest='use_natural_foreign_keys', default=False,
help='Use natural foreign keys if they are available.')
parser.add_argument('--natural-primary', action='store_true', dest='use_natural_primary_keys', default=False,
help='Use natural primary keys if they are available.')
parser.add_argument('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.")
parser.add_argument('--pks', dest='primary_keys',
help="Only dump objects with given primary keys. "
"Accepts a comma separated list of keys. "
"This option will only work when you specify one model.")
parser.add_argument('-o', '--output', default=None, dest='output',
help='Specifies file to which the output is written.')
def handle(self, *app_labels, **options):
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
output = options.get('output')
show_traceback = options.get('traceback')
use_natural_foreign_keys = options.get('use_natural_foreign_keys')
use_natural_primary_keys = options.get('use_natural_primary_keys')
use_base_manager = options.get('use_base_manager')
pks = options.get('primary_keys')
if pks:
primary_keys = pks.split(',')
else:
primary_keys = []
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
try:
model = apps.get_model(exclude)
except LookupError:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model)
else:
try:
app_config = apps.get_app_config(exclude)
except LookupError as e:
raise CommandError(str(e))
excluded_apps.add(app_config)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict((app_config, None)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects(count_only=False):
"""
Collate the objects to be serialized. If count_only is True, just
count the number of objects to be serialized.
"""
for model in serializers.sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
if count_only:
yield queryset.order_by().count()
else:
for obj in queryset.iterator():
yield obj
try:
self.stdout.ending = None
progress_output = None
object_count = 0
# If dumpdata is outputting to stdout, there is no way to display progress
if (output and self.stdout.isatty() and options['verbosity'] > 0):
progress_output = self.stdout
object_count = sum(get_objects(count_only=True))
stream = open(output, 'w') if output else None
try:
serializers.serialize(format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout, progress_output=progress_output,
object_count=object_count)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| bsd-3-clause |
oscardagrach/linux | Documentation/sphinx/load_config.py | 456 | 1333 | # -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
import os
import sys
from sphinx.util.pycompat import execfile_
# ------------------------------------------------------------------------------
def loadConfig(namespace):
# ------------------------------------------------------------------------------
u"""Load an additional configuration file into *namespace*.
The name of the configuration file is taken from the environment
``SPHINX_CONF``. The external configuration file extends (or overwrites) the
configuration values from the origin ``conf.py``. With this you are able to
maintain *build themes*. """
config_file = os.environ.get("SPHINX_CONF", None)
if (config_file is not None
and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
config_file = os.path.abspath(config_file)
if os.path.isfile(config_file):
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
config = namespace.copy()
config['__file__'] = config_file
execfile_(config_file, config)
del config['__file__']
namespace.update(config)
else:
sys.stderr.write("WARNING: additional sphinx-config not found: %s\n" % config_file)
| gpl-2.0 |
alexandrucoman/vbox-neutron-agent | neutron/plugins/oneconvergence/lib/plugin_helper.py | 9 | 6793 | # Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Library to talk to NVSD controller."""
import httplib
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
from six.moves.urllib import parse
from neutron.i18n import _LE, _LW
import neutron.plugins.oneconvergence.lib.exception as exception
LOG = logging.getLogger(__name__)
def initialize_plugin_helper():
nvsdcontroller = NVSDController()
return nvsdcontroller
class NVSDController(object):
"""Encapsulates the NVSD Controller details."""
def __init__(self):
self._host = cfg.CONF.nvsd.nvsd_ip
self._port = cfg.CONF.nvsd.nvsd_port
self._user = cfg.CONF.nvsd.nvsd_user
self._password = cfg.CONF.nvsd.nvsd_passwd
self._retries = cfg.CONF.nvsd.nvsd_retries
self._request_timeout = float(cfg.CONF.nvsd.request_timeout)
self.api_url = 'http://' + self._host + ':' + str(self._port)
self.pool = requests.Session()
self.auth_token = None
def do_request(self, method, url=None, headers=None, data=None):
response = self.pool.request(method, url=url,
headers=headers, data=data,
timeout=self._request_timeout)
return response
def login(self):
"""Login to NVSD Controller."""
headers = {"Content-Type": "application/json"}
login_url = parse.urljoin(self.api_url,
"/pluginhandler/ocplugin/authmgmt/login")
data = jsonutils.dumps({"user_name": self._user,
"passwd": self._password})
attempts = 0
while True:
if attempts < self._retries:
attempts += 1
elif self._retries == 0:
attempts = 0
else:
msg = _("Unable to connect to NVSD controller. Exiting after "
"%(retries)s attempts") % {'retries': self._retries}
LOG.error(msg)
raise exception.ServerException(reason=msg)
try:
response = self.do_request("POST", url=login_url,
headers=headers, data=data)
break
except Exception as e:
LOG.error(_LE("Login Failed: %s"), e)
LOG.error(_LE("Unable to establish connection"
" with Controller %s"), self.api_url)
LOG.error(_LE("Retrying after 1 second..."))
time.sleep(1)
if response.status_code == requests.codes.ok:
LOG.debug("Login Successful %(uri)s "
"%(status)s", {'uri': self.api_url,
'status': response.status_code})
self.auth_token = jsonutils.loads(response.content)["session_uuid"]
LOG.debug("AuthToken = %s", self.auth_token)
else:
LOG.error(_LE("login failed"))
return
def request(self, method, url, body="", content_type="application/json"):
"""Issue a request to NVSD controller."""
if self.auth_token is None:
LOG.warning(_LW("No Token, Re-login"))
self.login()
headers = {"Content-Type": content_type}
uri = parse.urljoin(url, "?authToken=%s" % self.auth_token)
url = parse.urljoin(self.api_url, uri)
request_ok = False
response = None
try:
response = self.do_request(method, url=url,
headers=headers, data=body)
LOG.debug("request: %(method)s %(uri)s successful",
{'method': method, 'uri': self.api_url + uri})
request_ok = True
except httplib.IncompleteRead as e:
response = e.partial
request_ok = True
except Exception as e:
LOG.error(_LE("request: Request failed from "
"Controller side :%s"), e)
if response is None:
# Timeout.
LOG.error(_LE("Response is Null, Request timed out: %(method)s to "
"%(uri)s"), {'method': method, 'uri': uri})
self.auth_token = None
raise exception.RequestTimeout()
status = response.status_code
if status == requests.codes.unauthorized:
self.auth_token = None
# Raise an exception to inform that the request failed.
raise exception.UnAuthorizedException()
if status in self.error_codes:
LOG.error(_LE("Request %(method)s %(uri)s body = %(body)s failed "
"with status %(status)s. Reason: %(reason)s)"),
{'method': method,
'uri': uri, 'body': body,
'status': status,
'reason': response.reason})
raise self.error_codes[status]()
elif status not in (requests.codes.ok, requests.codes.created,
requests.codes.no_content):
LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
"%(status)d"), {'method': method, 'url': url,
'status': status})
return
if not request_ok:
LOG.error(_LE("Request failed from Controller side with "
"Status=%s"), status)
raise exception.ServerException()
else:
LOG.debug("Success: %(method)s %(url)s status=%(status)s",
{'method': method, 'url': self.api_url + uri,
'status': status})
response.body = response.content
return response
error_codes = {
404: exception.NotFoundException,
409: exception.BadRequestException,
500: exception.InternalServerError,
503: exception.ServerException,
403: exception.ForbiddenException,
301: exception.NVSDAPIException,
307: exception.NVSDAPIException,
400: exception.NVSDAPIException,
}
| apache-2.0 |
itdc/sublimetext-itdchelper | itdchelper/asanalib/requests/cookies.py | 34 | 13686 | # -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import collections
from .compat import cookielib, urlparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
return self._r.url
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Don't use the dict interface internally; it's just for compatibility with
with external client code. All `requests` code should work out of the box
with externally provided instances of CookieJar, e.g., LWPCookieJar and
FileCookieJar.
Caution: dictionary operations that are normally O(1) may be O(n).
Unlike a regular CookieJar, this class is pickleable.
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains. Caution: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the jar.
See values() and items()."""
keys = []
for cookie in iter(self):
keys.append(cookie.name)
return keys
def values(self):
"""Dict-like values() that returns a list of values of cookies from the jar.
See keys() and items()."""
values = []
for cookie in iter(self):
values.append(cookie.value)
return values
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the jar.
See keys() and values(). Allows client-code to call "dict(RequestsCookieJar)
and get a vanilla python dict of key value pairs."""
items = []
for cookie in iter(self):
items.append((cookie.name, cookie.value))
return items
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain old
Python dict of name-value pairs of cookies that meet the requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws exception
if there are more than one cookie with name. In that case, use the more
explicit get() method instead. Caution: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws exception
if there is already a cookie of that name in the jar. In that case, use the more
explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
remove_cookie_by_name(self, name)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as args name
and optional domain and path. Returns a cookie.value. If there are conflicting cookies,
_find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown
if there are conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""__get_item__ and get call _find_no_duplicates -- never used in Requests internally.
Takes as args name and optional domain and path. Returns a cookie.value.
Throws KeyError if cookie is not found and CookieConflictError if there are
multiple cookies that match name and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""This is not implemented. Calling this will throw an exception."""
raise NotImplementedError
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
c = create_cookie(
name=morsel.key,
value=morsel.value,
version=morsel['version'] or 0,
port=None,
port_specified=False,
domain=morsel['domain'],
domain_specified=bool(morsel['domain']),
domain_initial_dot=morsel['domain'].startswith('.'),
path=morsel['path'],
path_specified=bool(morsel['path']),
secure=bool(morsel['secure']),
expires=morsel['max-age'] or morsel['expires'],
discard=False,
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,)
return c
def cookiejar_from_dict(cookie_dict, cookiejar=None):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
for name in cookie_dict:
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
| mit |
TalShafir/ansible | lib/ansible/plugins/action/win_updates.py | 21 | 12639 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.yaml.objects import AnsibleUnicode
from ansible.plugins.action import ActionBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
DEFAULT_REBOOT_TIMEOUT = 1200
def _validate_categories(self, category_names):
valid_categories = [
'Application',
'Connectors',
'CriticalUpdates',
'DefinitionUpdates',
'DeveloperKits',
'FeaturePacks',
'Guidance',
'SecurityUpdates',
'ServicePacks',
'Tools',
'UpdateRollups',
'Updates'
]
for name in category_names:
if name not in valid_categories:
raise AnsibleError("Unknown category_name %s, must be one of "
"(%s)" % (name, ','.join(valid_categories)))
def _run_win_updates(self, module_args, task_vars, use_task):
display.vvv("win_updates: running win_updates module")
wrap_async = self._task.async_val
result = self._execute_module_with_become(module_name='win_updates',
module_args=module_args,
task_vars=task_vars,
wrap_async=wrap_async,
use_task=use_task)
return result
def _reboot_server(self, task_vars, reboot_timeout, use_task):
display.vvv("win_updates: rebooting remote host after update install")
reboot_args = {
'reboot_timeout': reboot_timeout
}
reboot_result = self._run_action_plugin('win_reboot', task_vars,
module_args=reboot_args)
if reboot_result.get('failed', False):
raise AnsibleError(reboot_result['msg'])
# only run this if the user has specified we can only use scheduled
# tasks, the win_shell command requires become and will be skipped if
# become isn't available to use
if use_task:
display.vvv("win_updates: skipping WUA is not busy check as "
"use_scheduled_task=True is set")
else:
display.vvv("win_updates: checking WUA is not busy with win_shell "
"command")
# While this always returns False after a reboot it doesn't return
# a value until Windows is actually ready and finished installing
# updates. This needs to run with become as WUA doesn't work over
# WinRM, ignore connection errors as another reboot can happen
command = "(New-Object -ComObject Microsoft.Update.Session)." \
"CreateUpdateInstaller().IsBusy"
shell_module_args = {
'_raw_params': command
}
try:
shell_result = self._execute_module_with_become(
module_name='win_shell', module_args=shell_module_args,
task_vars=task_vars, wrap_async=False, use_task=use_task
)
display.vvv("win_updates: shell wait results: %s"
% json.dumps(shell_result))
except Exception as exc:
display.debug("win_updates: Fatal error when running shell "
"command, attempting to recover: %s" % to_text(exc))
display.vvv("win_updates: ensure the connection is up and running")
# in case Windows needs to reboot again after the updates, we wait for
# the connection to be stable again
wait_for_result = self._run_action_plugin('wait_for_connection',
task_vars)
if wait_for_result.get('failed', False):
raise AnsibleError(wait_for_result['msg'])
def _run_action_plugin(self, plugin_name, task_vars, module_args=None):
# Create new task object and reset the args
new_task = self._task.copy()
new_task.args = {}
if module_args is not None:
for key, value in module_args.items():
new_task.args[key] = value
# run the action plugin and return the results
action = self._shared_loader_obj.action_loader.get(
plugin_name,
task=new_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj
)
return action.run(task_vars=task_vars)
def _merge_dict(self, original, new):
dict_var = original.copy()
dict_var.update(new)
return dict_var
def _execute_module_with_become(self, module_name, module_args, task_vars,
wrap_async, use_task):
orig_become = self._play_context.become
orig_become_method = self._play_context.become_method
orig_become_user = self._play_context.become_user\
if not use_task:
if orig_become is None or orig_become is False:
self._play_context.become = True
if orig_become_method != 'runas':
self._play_context.become_method = 'runas'
if orig_become_user is None or orig_become_user == 'root':
self._play_context.become_user = 'SYSTEM'
try:
module_res = self._execute_module(module_name=module_name,
module_args=module_args,
task_vars=task_vars,
wrap_async=wrap_async)
finally:
self._play_context.become = orig_become
self._play_context.become_method = orig_become_method
self._play_context.become_user = orig_become_user
return module_res
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
category_names = self._task.args.get('category_names', [
'CriticalUpdates',
'SecurityUpdates',
'UpdateRollups',
])
if isinstance(category_names, AnsibleUnicode):
category_names = [cat.strip() for cat in category_names.split(",")]
state = self._task.args.get('state', 'installed')
reboot = self._task.args.get('reboot', False)
reboot_timeout = self._task.args.get('reboot_timeout',
self.DEFAULT_REBOOT_TIMEOUT)
use_task = boolean(self._task.args.get('use_scheduled_task', False),
strict=False)
# Validate the options
try:
self._validate_categories(category_names)
except AnsibleError as exc:
result['failed'] = True
result['msg'] = to_text(exc)
return result
if state not in ['installed', 'searched']:
result['failed'] = True
result['msg'] = "state must be either installed or searched"
return result
try:
reboot = boolean(reboot)
except TypeError as exc:
result['failed'] = True
result['msg'] = "cannot parse reboot as a boolean: %s" % to_text(exc)
return result
if not isinstance(reboot_timeout, int):
result['failed'] = True
result['msg'] = "reboot_timeout must be an integer"
return result
if reboot and self._task.async_val > 0:
result['failed'] = True
result['msg'] = "async is not supported for this task when " \
"reboot=yes"
return result
# Run the module
new_module_args = self._task.args.copy()
new_module_args.pop('reboot', None)
new_module_args.pop('reboot_timeout', None)
result = self._run_win_updates(new_module_args, task_vars, use_task)
# if the module failed to run at all then changed won't be populated
# so we just return the result as is
# https://github.com/ansible/ansible/issues/38232
failed = result.get('failed', False)
if ("updates" not in result.keys() and self._task.async_val == 0) or failed:
result['failed'] = True
return result
changed = result.get('changed', False)
updates = result.get('updates', dict())
filtered_updates = result.get('filtered_updates', dict())
found_update_count = result.get('found_update_count', 0)
installed_update_count = result.get('installed_update_count', 0)
# Handle automatic reboots if the reboot flag is set
if reboot and state == 'installed' and not \
self._play_context.check_mode:
previously_errored = False
while result['installed_update_count'] > 0 or \
result['found_update_count'] > 0 or \
result['reboot_required'] is True:
display.vvv("win_updates: check win_updates results for "
"automatic reboot: %s" % json.dumps(result))
# check if the module failed, break from the loop if it
# previously failed and return error to the user
if result.get('failed', False):
if previously_errored:
break
previously_errored = True
else:
previously_errored = False
reboot_error = None
# check if a reboot was required before installing the updates
if result.get('msg', '') == "A reboot is required before " \
"more updates can be installed":
reboot_error = "reboot was required before more updates " \
"can be installed"
if result.get('reboot_required', False):
if reboot_error is None:
reboot_error = "reboot was required to finalise " \
"update install"
try:
changed = True
self._reboot_server(task_vars, reboot_timeout,
use_task)
except AnsibleError as exc:
result['failed'] = True
result['msg'] = "Failed to reboot remote host when " \
"%s: %s" \
% (reboot_error, to_text(exc))
break
result.pop('msg', None)
# rerun the win_updates module after the reboot is complete
result = self._run_win_updates(new_module_args, task_vars,
use_task)
if result.get('failed', False):
return result
result_updates = result.get('updates', dict())
result_filtered_updates = result.get('filtered_updates', dict())
updates = self._merge_dict(updates, result_updates)
filtered_updates = self._merge_dict(filtered_updates,
result_filtered_updates)
found_update_count += result.get('found_update_count', 0)
installed_update_count += result.get('installed_update_count', 0)
if result['changed']:
changed = True
# finally create the return dict based on the aggregated execution
# values if we are not in async
if self._task.async_val == 0:
result['changed'] = changed
result['updates'] = updates
result['filtered_updates'] = filtered_updates
result['found_update_count'] = found_update_count
result['installed_update_count'] = installed_update_count
return result
| gpl-3.0 |
Weil0ng/gem5 | src/mem/slicc/ast/InPortDeclAST.py | 29 | 5020 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.ast.TypeAST import TypeAST
from slicc.symbols import Func, Type, Var
class InPortDeclAST(DeclAST):
def __init__(self, slicc, ident, msg_type, var_expr, pairs, statements):
super(InPortDeclAST, self).__init__(slicc, pairs)
self.ident = ident
self.msg_type = msg_type
self.var_expr = var_expr
self.statements = statements
self.queue_type = TypeAST(slicc, "InPort")
def __repr__(self):
return "[InPortDecl: %s]" % self.ident
def generate(self):
symtab = self.symtab
void_type = symtab.find("void", Type)
machine = symtab.state_machine
if machine is None:
self.error("InPort declaration not part of a machine.")
code = self.slicc.codeFormatter()
queue_type = self.var_expr.generate(code)
if not queue_type.isInPort:
self.error("The inport queue's type must have the 'inport' " + \
"attribute. Type '%s' does not have this attribute.",
queue_type)
type = self.queue_type.type
self.pairs["buffer_expr"] = self.var_expr
in_port = Var(self.symtab, self.ident, self.location, type, str(code),
self.pairs, machine)
symtab.newSymbol(in_port)
symtab.pushFrame()
param_types = []
# Check for Event
type = symtab.find("Event", Type)
if type is None:
self.error("in_port decls require 'Event' enumeration defined")
param_types.append(type)
# Check for Address
type = symtab.find("Addr", Type)
if type is None:
self.error("in_port decls require 'Addr' type to be defined")
param_types.append(type)
if machine.EntryType != None:
param_types.append(machine.EntryType)
if machine.TBEType != None:
param_types.append(machine.TBEType)
# Add the trigger method - FIXME, this is a bit dirty
pairs = { "external" : "yes" }
trigger_func_name = "trigger"
for param in param_types:
trigger_func_name += "_" + param.ident
func = Func(self.symtab, trigger_func_name, "trigger", self.location,
void_type, param_types, [], "", pairs)
symtab.newSymbol(func)
# Add the stallPort method - this hacks reschedules the controller
# for stalled messages that don't trigger events
func = Func(self.symtab, "stallPort", "stallPort", self.location,
void_type, [], [], "", pairs)
symtab.newSymbol(func)
param_types = []
# Check for Event2
type = symtab.find("Event", Type)
if type is None:
self.error("in_port decls require 'Event' enumeration")
param_types.append(type)
# Check for Address2
type = symtab.find("Addr", Type)
if type is None:
self.error("in_port decls require 'Addr' type to be defined")
param_types.append(type)
if self.statements is not None:
rcode = self.slicc.codeFormatter()
rcode.indent()
rcode.indent()
self.statements.generate(rcode, None)
in_port["c_code_in_port"] = str(rcode)
symtab.popFrame()
# Add port to state machine
machine.addInPort(in_port)
| bsd-3-clause |
dmitry-sobolev/ansible | lib/ansible/plugins/action/junos.py | 5 | 5656 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import copy
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.junos import junos_argument_spec
from ansible.module_utils.six import iteritems
from ansible.plugins import connection_loader, module_loader
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
module = module_loader._load_module_source(self._task.action, module_loader.find_plugin(self._task.action))
if not getattr(module, 'USE_PERSISTENT_CONNECTION', False):
return super(ActionModule, self).run(tmp, task_vars)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.network_os = 'junos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
if self._task.action == 'junos_netconf':
pc.connection = 'network_cli'
pc.port = provider['port'] or self._play_context.port or 22
else:
pc.connection = 'netconf'
pc.port = provider['port'] or self._play_context.port or 830
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = provider['timeout'] or self._play_context.timeout
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not os.path.exists(socket_path):
# start the connection if it isn't started
if pc.connection == 'netconf':
rc, out, err = connection.exec_command('open_session()')
else:
rc, out, err = connection.exec_command('open_shell()')
if rc != 0:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell',
'rc': rc}
elif pc.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def _get_socket_path(self, play_context):
ssh = connection_loader.get('ssh', class_only=True)
path = unfrackpath("$HOME/.ansible/pc")
# use play_context.connection instea of play_context.port to avoid
# collision if netconf is listening on port 22
#cp = ssh._create_control_path(play_context.remote_addr, play_context.connection, play_context.remote_user)
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user)
return cp % dict(directory=path)
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(junos_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| gpl-3.0 |
zeehio/python-telegram-bot | telegram/document.py | 2 | 2098 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015 Leandro Toledo de Souza <leandrotoeldodesouza@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram Document"""
from telegram import PhotoSize, TelegramObject
class Document(TelegramObject):
"""This object represents a Telegram Document.
Attributes:
file_id (str):
thumb (:class:`telegram.PhotoSize`):
file_name (str):
mime_type (str):
file_size (int):
Args:
file_id (str):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
thumb (Optional[:class:`telegram.PhotoSize`]):
file_name (Optional[str]):
mime_type (Optional[str]):
file_size (Optional[int]):
"""
def __init__(self,
file_id,
**kwargs):
# Required
self.file_id = str(file_id)
# Optionals
self.thumb = kwargs.get('thumb')
self.file_name = str(kwargs.get('file_name', ''))
self.mime_type = str(kwargs.get('mime_type', ''))
self.file_size = int(kwargs.get('file_size', 0))
@staticmethod
def de_json(data):
"""
Args:
data (str):
Returns:
telegram.Document:
"""
if not data:
return None
data['thumb'] = PhotoSize.de_json(data.get('thumb'))
return Document(**data)
| gpl-3.0 |
ZefQ/Flexget | flexget/plugins/cli/database.py | 8 | 3612 | from __future__ import unicode_literals, division, absolute_import
from flexget import options
from flexget.db_schema import reset_schema, plugin_schemas
from flexget.event import event
from flexget.logger import console
from flexget.manager import Base, Session
def do_cli(manager, options):
with manager.acquire_lock():
if options.db_action == 'cleanup':
cleanup(manager)
elif options.db_action == 'vacuum':
vacuum()
elif options.db_action == 'reset':
reset(manager)
elif options.db_action == 'reset-plugin':
reset_plugin(options)
def cleanup(manager):
manager.db_cleanup(force=True)
console('Database cleanup complete.')
def vacuum():
console('Running VACUUM on sqlite database, this could take a while.')
session = Session()
try:
session.execute('VACUUM')
session.commit()
finally:
session.close()
console('VACUUM complete.')
def reset(manager):
Base.metadata.drop_all(bind=manager.engine)
Base.metadata.create_all(bind=manager.engine)
console('The FlexGet database has been reset.')
def reset_plugin(options):
plugin = options.reset_plugin
if not plugin:
if options.porcelain:
console('%-20s | Ver | Tables' % 'Name')
else:
console('-' * 79)
console('%-20s Ver Tables' % 'Name')
console('-' * 79)
for k, v in sorted(plugin_schemas.iteritems()):
tables = ''
line_len = 0
for name in v['tables']:
if options.porcelain:
pass
else:
if line_len + len(name) + 2 >= 53:
tables += '\n'
tables += ' ' * 26
line_len = len(name) + 2
else:
line_len += len(name) + 2
tables += name + ', '
tables = tables.rstrip(', ')
if options.porcelain:
console('%-20s %s %-3s %s %s' % (k, '|', v['version'], '|', tables))
else:
console('%-20s %-2s %s' % (k, v['version'], tables))
else:
try:
reset_schema(plugin)
console('The database for `%s` has been reset.' % plugin)
except ValueError as e:
console('Unable to reset %s: %s' % (plugin, e.message))
@event('options.register')
def register_parser_arguments():
parser = options.register_command('database', do_cli, help='utilities to manage the FlexGet database')
subparsers = parser.add_subparsers(title='Actions', metavar='<action>', dest='db_action')
subparsers.add_parser('cleanup', help='make all plugins clean un-needed data from the database')
subparsers.add_parser('vacuum', help='running vacuum can increase performance and decrease database size')
reset_parser = subparsers.add_parser('reset', add_help=False, help='reset the entire database (DANGEROUS!)')
reset_parser.add_argument('--sure', action='store_true', required=True,
help='you must use this flag to indicate you REALLY want to do this')
reset_plugin_parser = subparsers.add_parser('reset-plugin', help='reset the database for a specific plugin')
reset_plugin_parser.add_argument('reset_plugin', metavar='<plugin>', nargs='?',
help='name of plugin to reset (if omitted, known plugins will be listed)')
reset_plugin_parser.add_argument('--porcelain', action='store_true', help='make the output parseable')
| mit |
fbradyirl/home-assistant | tests/components/device_tracker/test_entities.py | 4 | 1970 | """Tests for device tracker entities."""
import pytest
from homeassistant.components.device_tracker.config_entry import (
BaseTrackerEntity,
ScannerEntity,
)
from homeassistant.components.device_tracker.const import (
SOURCE_TYPE_ROUTER,
ATTR_SOURCE_TYPE,
DOMAIN,
)
from homeassistant.const import STATE_HOME, STATE_NOT_HOME, ATTR_BATTERY_LEVEL
from tests.common import MockConfigEntry
async def test_scanner_entity_device_tracker(hass):
"""Test ScannerEntity based device tracker."""
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
entity_id = "device_tracker.unnamed_device"
entity_state = hass.states.get(entity_id)
assert entity_state.attributes == {
ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER,
ATTR_BATTERY_LEVEL: 100,
}
assert entity_state.state == STATE_NOT_HOME
entity = hass.data[DOMAIN].get_entity(entity_id)
entity.set_connected()
await hass.async_block_till_done()
entity_state = hass.states.get(entity_id)
assert entity_state.state == STATE_HOME
def test_scanner_entity():
"""Test coverage for base ScannerEntity entity class."""
entity = ScannerEntity()
with pytest.raises(NotImplementedError):
assert entity.source_type is None
with pytest.raises(NotImplementedError):
assert entity.is_connected is None
with pytest.raises(NotImplementedError):
assert entity.state == STATE_NOT_HOME
assert entity.battery_level is None
def test_base_tracker_entity():
"""Test coverage for base BaseTrackerEntity entity class."""
entity = BaseTrackerEntity()
with pytest.raises(NotImplementedError):
assert entity.source_type is None
assert entity.battery_level is None
with pytest.raises(NotImplementedError):
assert entity.state_attributes is None
| apache-2.0 |
PokemonGoF/PokemonGo-Bot-Desktop | build/pywin/Lib/traceback.py | 64 | 11285 | """Extract, format and print information about Python stack traces."""
import linecache
import sys
import types
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb', 'tb_lineno']
def _print(file, str='', terminator='\n'):
file.write(str+terminator)
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
if line:
_print(file, ' %s' % line.strip())
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
list = []
for filename, lineno, name, line in extracted_list:
item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
if line:
item = item + ' %s\n' % line.strip()
list.append(item)
return list
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1
def format_tb(tb, limit = None):
"""A shorthand for 'format_list(extract_tb(tb, limit))'."""
return format_list(extract_tb(tb, limit))
def extract_tb(tb, limit = None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n+1
return list
def print_exception(etype, value, tb, limit=None, file=None):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
if tb:
_print(file, 'Traceback (most recent call last):')
print_tb(tb, limit, file)
lines = format_exception_only(etype, value)
for line in lines:
_print(file, line, '')
def format_exception(etype, value, tb, limit = None):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
if tb:
list = ['Traceback (most recent call last):\n']
list = list + format_tb(tb, limit)
else:
list = []
list = list + format_exception_only(etype, value)
return list
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would raise another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')
offset = min(len(caretspace), offset) - 1
caretspace = caretspace[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except Exception:
pass
try:
value = unicode(value)
return value.encode("ascii", "backslashreplace")
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
def print_exc(limit=None, file=None):
"""Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
(In fact, it uses sys.exc_info() to retrieve the same information
in a thread-safe way.)"""
if file is None:
file = sys.stderr
try:
etype, value, tb = sys.exc_info()
print_exception(etype, value, tb, limit, file)
finally:
etype = value = tb = None
def format_exc(limit=None):
"""Like print_exc() but return a string."""
try:
etype, value, tb = sys.exc_info()
return ''.join(format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
def print_last(limit=None, file=None):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
if file is None:
file = sys.stderr
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
print_list(extract_stack(f, limit), file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
return format_list(extract_stack(f, limit))
def extract_stack(f=None, limit = None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
f = f.f_back
n = n+1
list.reverse()
return list
def tb_lineno(tb):
"""Calculate correct line number of traceback given in tb.
Obsolete in 2.3.
"""
return tb.tb_lineno
| mit |
luca76/QGIS | python/plugins/processing/script/ScriptAlgorithm.py | 1 | 11619 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.gui.Help2Html import getHtmlFromHelpFile
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterString import ParameterString
from processing.parameters.ParameterCrs import ParameterCrs
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterTableField import ParameterTableField
from processing.parameters.ParameterExtent import ParameterExtent
from processing.parameters.ParameterFile import ParameterFile
from processing.parameters.ParameterFactory import ParameterFactory
from processing.outputs.OutputTable import OutputTable
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputRaster import OutputRaster
from processing.outputs.OutputNumber import OutputNumber
from processing.outputs.OutputString import OutputString
from processing.outputs.OutputHTML import OutputHTML
from processing.outputs.OutputFile import OutputFile
from processing.outputs.OutputDirectory import OutputDirectory
from processing.outputs.OutputFactory import OutputFactory
from processing.script.WrongScriptException import WrongScriptException
class ScriptAlgorithm(GeoAlgorithm):
def __init__(self, descriptionFile, script=None):
"""The script parameter can be used to directly pass the code
of the script without a file.
This is to be used from the script edition dialog, but should
not be used in other cases.
"""
GeoAlgorithm.__init__(self)
self.script = script
self.descriptionFile = descriptionFile
if script is not None:
self.defineCharacteristicsFromScript()
if descriptionFile is not None:
self.defineCharacteristicsFromFile()
def getCopy(self):
newone = ScriptAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + '/../images/script.png')
def defineCharacteristicsFromFile(self):
self.script = ''
self.silentOutputs = []
filename = os.path.basename(self.descriptionFile)
self.name = filename[:filename.rfind('.')].replace('_', ' ')
self.group = 'User scripts'
lines = open(self.descriptionFile)
line = lines.readline()
while line != '':
if line.startswith('##'):
try:
self.processParameterLine(line.strip('\n'))
except:
raise WrongScriptException('Could not load script: '
+ self.descriptionFile + '\n'
+ 'Problem with line: ' + line)
self.script += line
line = lines.readline()
lines.close()
if self.group == '[Test scripts]':
self.showInModeler = False
self.showInToolbox = False
def defineCharacteristicsFromScript(self):
lines = self.script.split('\n')
self.silentOutputs = []
self.name = '[Unnamed algorithm]'
self.group = 'User scripts'
for line in lines:
if line.startswith('##'):
try:
self.processParameterLine(line.strip('\n'))
except:
pass
def createDescriptiveName(self, s):
return s.replace('_', ' ')
def processParameterLine(self, line):
param = None
out = None
line = line.replace('#', '')
# If the line is in the format of the text description files for
# normal algorithms, then process it using parameter and output
# factories
if '|' in line:
self.processDescriptionParameterLine(line)
return
tokens = line.split('=', 1)
desc = self.createDescriptiveName(tokens[0])
if tokens[1].lower().strip() == 'group':
self.group = tokens[0]
return
if tokens[1].lower().strip() == 'name':
self.name = tokens[0]
return
if tokens[1].lower().strip() == 'raster':
param = ParameterRaster(tokens[0], desc, False)
elif tokens[1].lower().strip() == 'vector':
param = ParameterVector(tokens[0], desc,
[ParameterVector.VECTOR_TYPE_ANY])
elif tokens[1].lower().strip() == 'vector point':
param = ParameterVector(tokens[0], desc,
[ParameterVector.VECTOR_TYPE_POINT])
elif tokens[1].lower().strip() == 'vector line':
param = ParameterVector(tokens[0], desc,
[ParameterVector.VECTOR_TYPE_LINE])
elif tokens[1].lower().strip() == 'vector polygon':
param = ParameterVector(tokens[0], desc,
[ParameterVector.VECTOR_TYPE_POLYGON])
elif tokens[1].lower().strip() == 'table':
param = ParameterTable(tokens[0], desc, False)
elif tokens[1].lower().strip() == 'multiple raster':
param = ParameterMultipleInput(tokens[0], desc,
ParameterMultipleInput.TYPE_RASTER)
param.optional = False
elif tokens[1].lower().strip() == 'multiple vector':
param = ParameterMultipleInput(tokens[0], desc,
ParameterMultipleInput.TYPE_VECTOR_ANY)
param.optional = False
elif tokens[1].lower().strip().startswith('selection'):
options = tokens[1].strip()[len('selection '):].split(';')
param = ParameterSelection(tokens[0], desc, options)
elif tokens[1].lower().strip().startswith('boolean'):
default = tokens[1].strip()[len('boolean') + 1:]
param = ParameterBoolean(tokens[0], desc, default)
elif tokens[1].lower().strip() == 'extent':
param = ParameterExtent(tokens[0], desc)
elif tokens[1].lower().strip() == 'file':
param = ParameterFile(tokens[0], desc, False)
elif tokens[1].lower().strip() == 'folder':
param = ParameterFile(tokens[0], desc, True)
elif tokens[1].lower().strip().startswith('number'):
default = tokens[1].strip()[len('number') + 1:]
param = ParameterNumber(tokens[0], desc, default=default)
elif tokens[1].lower().strip().startswith('field'):
field = tokens[1].strip()[len('field') + 1:]
found = False
for p in self.parameters:
if p.name == field:
found = True
break
if found:
param = ParameterTableField(tokens[0], tokens[0], field)
elif tokens[1].lower().strip().startswith('string'):
default = tokens[1].strip()[len('string') + 1:]
param = ParameterString(tokens[0], desc, default)
elif tokens[1].lower().strip().startswith('longstring'):
default = tokens[1].strip()[len('longstring') + 1:]
param = ParameterString(tokens[0], desc, default, multiline = True)
elif tokens[1].lower().strip().startswith('crs'):
default = tokens[1].strip()[len('crs') + 1:]
if not default:
default = 'EPSG:4326'
param = ParameterCrs(tokens[0], desc, default)
elif tokens[1].lower().strip().startswith('output raster'):
out = OutputRaster()
elif tokens[1].lower().strip().startswith('output vector'):
out = OutputVector()
elif tokens[1].lower().strip().startswith('output table'):
out = OutputTable()
elif tokens[1].lower().strip().startswith('output html'):
out = OutputHTML()
elif tokens[1].lower().strip().startswith('output file'):
out = OutputFile()
elif tokens[1].lower().strip().startswith('output directory'):
out = OutputDirectory()
elif tokens[1].lower().strip().startswith('output number'):
out = OutputNumber()
elif tokens[1].lower().strip().startswith('output string'):
out = OutputString()
if param is not None:
self.addParameter(param)
elif out is not None:
out.name = tokens[0]
out.description = tokens[0]
self.addOutput(out)
else:
raise WrongScriptException('Could not load script:'
+ self.descriptionFile or ''
+ '.\n Problem with line "' + line + '"'
)
def processDescriptionParameterLine(self, line):
try:
if line.startswith('Parameter'):
self.addParameter(ParameterFactory.getFromString(line))
elif line.startswith('*Parameter'):
param = ParameterFactory.getFromString(line[1:])
param.isAdvanced = True
self.addParameter(param)
else:
self.addOutput(OutputFactory.getFromString(line))
except Exception:
raise WrongScriptException('Could not load script:'
+ self.descriptionFile or ''
+ '.\n Problem with line "' + line + '"'
)
def processAlgorithm(self, progress):
script = 'import processing\n'
ns = {}
ns['progress'] = progress
for param in self.parameters:
ns[param.name] = param.value
for out in self.outputs:
ns[out.name] = out.value
script += self.script
exec script in ns
for out in self.outputs:
out.setValue(ns[out.name])
def help(self):
if self.descriptionFile is None:
return False, None
helpfile = self.descriptionFile + '.help'
if os.path.exists(helpfile):
return True, getHtmlFromHelpFile(self, helpfile)
else:
return False, None
| gpl-2.0 |
seckcoder/lang-learn | python/sklearn/examples/linear_model/plot_sgd_comparison.py | 7 | 1641 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: Simplified BSD
import numpy as np
import pylab as pl
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
classifiers = [
("SGD", SGDClassifier()),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1 - np.array(heldout)
for name, clf in classifiers:
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target,
test_size=i)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
pl.plot(xx, yy, label=name)
pl.legend(loc="upper right")
pl.xlabel("Proportion train")
pl.ylabel("Test Error Rate")
pl.show()
| unlicense |
jimbobhickville/taskflow | taskflow/utils/schema_utils.py | 4 | 1199 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from jsonschema import exceptions as schema_exc
# Special jsonschema validation types/adjustments.
_SCHEMA_TYPES = {
# See: https://github.com/Julian/jsonschema/issues/148
'array': (list, tuple),
}
# Expose these types so that people don't have to import the same exceptions.
ValidationError = schema_exc.ValidationError
SchemaError = schema_exc.SchemaError
def schema_validate(data, schema):
"""Validates given data using provided json schema."""
jsonschema.validate(data, schema, types=_SCHEMA_TYPES)
| apache-2.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/framework/startup.py | 17 | 2151 | # startup.py
#
"The main application startup code for PythonWin."
#
# This does the basic command line handling.
# Keep this as short as possible, cos error output is only redirected if
# this runs OK. Errors in imported modules are much better - the messages go somewhere (not any more :-)
import sys
import win32ui
import strop
# You may wish to redirect error output somewhere useful if you have startup errors.
# eg, 'import win32traceutil' will do this for you.
# import win32traceutil # Just uncomment this line to see error output!
# An old class I used to use - generally only useful if Pythonwin is running under MSVC
#class DebugOutput:
# softspace=1
# def write(self,message):
# win32ui.OutputDebug(message)
#sys.stderr=sys.stdout=DebugOutput()
# To fix a problem with Pythonwin when started from the Pythonwin directory,
# we update the pywin path to ensure it is absolute.
# If it is indeed relative, it will be relative to our current directory.
# If its already absolute, then this will have no affect.
import pywin, pywin.framework
pywin.__path__[0] = win32ui.FullPath(pywin.__path__[0])
pywin.framework.__path__[0] = win32ui.FullPath(pywin.framework.__path__[0])
# make a few wierd sys values. This is so later we can clobber sys.argv to trick
# scripts when running under a GUI environment.
moduleName = "intpyapp"
sys.appargvoffset = 0
sys.appargv = sys.argv[:]
# Must check for /app param here.
if len(sys.argv)>=2 and strop.lower(sys.argv[0])=='/app':
import cmdline
moduleName = cmdline.FixArgFileName(sys.argv[1])
sys.appargvoffset = 2
newargv=sys.argv[sys.appargvoffset:]
# newargv.insert(0, sys.argv[0])
sys.argv = newargv
exec "import %s\n" % moduleName
try:
win32ui.GetApp()._obj_
# This worked - an app already exists - do nothing more
except (AttributeError, win32ui.error):
# This means either no app object exists at all, or the one
# that does exist does not have a Python class (ie, was created
# by the host .EXE). In this case, we do the "old style" init...
import app
if app.AppBuilder is None:
raise TypeError, "No application object has been registered"
app.App = app.AppBuilder()
| apache-2.0 |
willingc/oh-mainline | vendor/packages/twisted/twisted/words/im/basesupport.py | 57 | 7890 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Instance Messenger base classes for protocol support.
You will find these useful if you're adding a new protocol to IM.
"""
# Abstract representation of chat "model" classes
from twisted.words.im.locals import ONLINE, OFFLINE, OfflineError
from twisted.words.im import interfaces
from twisted.internet.protocol import Protocol
from twisted.python.reflect import prefixedMethods
from twisted.persisted import styles
from twisted.internet import error
class AbstractGroup:
def __init__(self, name, account):
self.name = name
self.account = account
def getGroupCommands(self):
"""finds group commands
these commands are methods on me that start with imgroup_; they are
called with no arguments
"""
return prefixedMethods(self, "imgroup_")
def getTargetCommands(self, target):
"""finds group commands
these commands are methods on me that start with imgroup_; they are
called with a user present within this room as an argument
you may want to override this in your group in order to filter for
appropriate commands on the given user
"""
return prefixedMethods(self, "imtarget_")
def join(self):
if not self.account.client:
raise OfflineError
self.account.client.joinGroup(self.name)
def leave(self):
if not self.account.client:
raise OfflineError
self.account.client.leaveGroup(self.name)
def __repr__(self):
return '<%s %r>' % (self.__class__, self.name)
def __str__(self):
return '%s@%s' % (self.name, self.account.accountName)
class AbstractPerson:
def __init__(self, name, baseAccount):
self.name = name
self.account = baseAccount
self.status = OFFLINE
def getPersonCommands(self):
"""finds person commands
these commands are methods on me that start with imperson_; they are
called with no arguments
"""
return prefixedMethods(self, "imperson_")
def getIdleTime(self):
"""
Returns a string.
"""
return '--'
def __repr__(self):
return '<%s %r/%s>' % (self.__class__, self.name, self.status)
def __str__(self):
return '%s@%s' % (self.name, self.account.accountName)
class AbstractClientMixin:
"""Designed to be mixed in to a Protocol implementing class.
Inherit from me first.
@ivar _logonDeferred: Fired when I am done logging in.
"""
def __init__(self, account, chatui, logonDeferred):
for base in self.__class__.__bases__:
if issubclass(base, Protocol):
self.__class__._protoBase = base
break
else:
pass
self.account = account
self.chat = chatui
self._logonDeferred = logonDeferred
def connectionMade(self):
self._protoBase.connectionMade(self)
def connectionLost(self, reason):
self.account._clientLost(self, reason)
self.unregisterAsAccountClient()
return self._protoBase.connectionLost(self, reason)
def unregisterAsAccountClient(self):
"""Tell the chat UI that I have `signed off'.
"""
self.chat.unregisterAccountClient(self)
class AbstractAccount(styles.Versioned):
"""Base class for Accounts.
I am the start of an implementation of L{IAccount<interfaces.IAccount>}, I
implement L{isOnline} and most of L{logOn}, though you'll need to implement
L{_startLogOn} in a subclass.
@cvar _groupFactory: A Callable that will return a L{IGroup} appropriate
for this account type.
@cvar _personFactory: A Callable that will return a L{IPerson} appropriate
for this account type.
@type _isConnecting: boolean
@ivar _isConnecting: Whether I am in the process of establishing a
connection to the server.
@type _isOnline: boolean
@ivar _isOnline: Whether I am currently on-line with the server.
@ivar accountName:
@ivar autoLogin:
@ivar username:
@ivar password:
@ivar host:
@ivar port:
"""
_isOnline = 0
_isConnecting = 0
client = None
_groupFactory = AbstractGroup
_personFactory = AbstractPerson
persistanceVersion = 2
def __init__(self, accountName, autoLogin, username, password, host, port):
self.accountName = accountName
self.autoLogin = autoLogin
self.username = username
self.password = password
self.host = host
self.port = port
self._groups = {}
self._persons = {}
def upgrateToVersion2(self):
# Added in CVS revision 1.16.
for k in ('_groups', '_persons'):
if not hasattr(self, k):
setattr(self, k, {})
def __getstate__(self):
state = styles.Versioned.__getstate__(self)
for k in ('client', '_isOnline', '_isConnecting'):
try:
del state[k]
except KeyError:
pass
return state
def isOnline(self):
return self._isOnline
def logOn(self, chatui):
"""Log on to this account.
Takes care to not start a connection if a connection is
already in progress. You will need to implement
L{_startLogOn} for this to work, and it would be a good idea
to override L{_loginFailed} too.
@returntype: Deferred L{interfaces.IClient}
"""
if (not self._isConnecting) and (not self._isOnline):
self._isConnecting = 1
d = self._startLogOn(chatui)
d.addCallback(self._cb_logOn)
# if chatui is not None:
# (I don't particularly like having to pass chatUI to this function,
# but we haven't factored it out yet.)
d.addCallback(chatui.registerAccountClient)
d.addErrback(self._loginFailed)
return d
else:
raise error.ConnectError("Connection in progress")
def getGroup(self, name):
"""Group factory.
@param name: Name of the group on this account.
@type name: string
"""
group = self._groups.get(name)
if group is None:
group = self._groupFactory(name, self)
self._groups[name] = group
return group
def getPerson(self, name):
"""Person factory.
@param name: Name of the person on this account.
@type name: string
"""
person = self._persons.get(name)
if person is None:
person = self._personFactory(name, self)
self._persons[name] = person
return person
def _startLogOn(self, chatui):
"""Start the sign on process.
Factored out of L{logOn}.
@returntype: Deferred L{interfaces.IClient}
"""
raise NotImplementedError()
def _cb_logOn(self, client):
self._isConnecting = 0
self._isOnline = 1
self.client = client
return client
def _loginFailed(self, reason):
"""Errorback for L{logOn}.
@type reason: Failure
@returns: I{reason}, for further processing in the callback chain.
@returntype: Failure
"""
self._isConnecting = 0
self._isOnline = 0 # just in case
return reason
def _clientLost(self, client, reason):
self.client = None
self._isConnecting = 0
self._isOnline = 0
return reason
def __repr__(self):
return "<%s: %s (%s@%s:%s)>" % (self.__class__,
self.accountName,
self.username,
self.host,
self.port)
| agpl-3.0 |
AndrewSallans/osf.io | scripts/migrate_guid.py | 64 | 8584 | """
Create a GUID for all non-GUID database records. If record already has a GUID,
skip; if record has an ID but not a GUID, create a GUID matching the ID. Newly
created records will have optimistically generated GUIDs.
"""
import time
import collections
from framework.mongo import StoredObject
from website import models
from website.app import init_app
app = init_app('website.settings', set_backends=True, routes=True)
def count_values(values):
counts = collections.defaultdict(int)
for value in values:
counts[value] += 1
return counts
def check_conflicts(conflict_models):
ids = []
for model in conflict_models:
ids += list(model.find().__iter__(raw=True))
if len(set(ids)) != len(ids):
print(
'Conflict among models {}'.format(
', '.join([model._name for model in conflict_models])
)
)
counts = count_values(ids)
case_conflicts = [
_id
for _id in counts
if counts[_id] > 1
]
ids = [
_id.lower()
for _id in ids
if _id
]
counts = count_values(ids)
no_case_conflicts = [
_id
for _id in counts
if counts[_id] > 1
]
return case_conflicts, no_case_conflicts
guid_models = [models.Node, models.User, models.NodeFile,
models.NodeWikiPage, models.MetaData]
def migrate_guid(conflict_models):
"""Check GUID models for conflicts, then migrate records that are not in
conflict. Lower-case primary keys; ensure GUIDs for each record; delete
outdated GUIDs.
"""
case_conflicts, no_case_conflicts = check_conflicts(conflict_models)
print 'Case conflicts', case_conflicts
print 'No-case conflicts', no_case_conflicts
if case_conflicts:
raise Exception('Unavoidable conflicts')
for model in conflict_models:
print 'Working on model', model._name
for obj in model.find():
if obj is None:
continue
# Check for existing GUID
guid = models.Guid.load(obj._primary_key)
print obj._primary_key
if guid is not None:
# Skip if GUID is already lower-cased
if guid._primary_key == guid._primary_key.lower():
continue
# Skip if GUID in no-case conflicts
if guid._primary_key.lower() in no_case_conflicts:
continue
# Delete GUID record
guid.remove_one(guid)
# Lower-case if not in no-case conflicts
if obj._primary_key.lower() not in no_case_conflicts:
obj._primary_key = obj._primary_key.lower()
obj.save()
# Update GUID
obj._ensure_guid()
check_pk_change(obj)
def check_pk_change(obj):
for backref in obj._backrefs_flat:
pk = backref[1]
if backref[0][1] == 'guid':
continue
Schema = StoredObject.get_collection(backref[0][1])
record = Schema.load(pk)
if record is None:
print 'Error: Backref {} not found'.format(pk)
field = getattr(record, backref[0][2])
if isinstance(field, list):
if obj not in field:
print 'Error: Object {} not in backref list'.format(pk)
else:
if field != obj:
print 'Error: Object {} not equal to backref'.format(pk)
for fname, fobj in obj._fields.items():
if fobj._is_foreign:
if fobj._list:
key = fobj._field_instance._backref_field_name
else:
key = fobj._backref_field_name
if not key:
continue
backref_key = '__'.join([
obj._name,
key,
fname,
])
value = getattr(obj, fname)
if not value:
continue
if fobj._list:
for item in value:
if item is None:
continue
if obj not in getattr(item, backref_key):
print 'Error: Obj {} not in backrefs of referent {}'.format(
obj._primary_key, fname
)
else:
if obj not in getattr(value, backref_key):
print 'Error: Obj {} not in backrefs of referent {}'.format(
obj._primary_key, fname
)
def migrate_guid_log(log):
"""Migrate non-reference fields containing primary keys on logs.
"""
for key in ['project', 'node']:
if key in log.params:
value = log.params[key] or ''
record = models.Node.load(value.lower())
if record is not None:
log.params[key] = record._primary_key
if 'contributor' in log.params:
if isinstance(log.params['contributor'], basestring):
record = models.User.load(log.params['contributor'].lower())
if record:
log.params['contributor'] = record._primary_key
if 'contributors' in log.params:
for idx, uid in enumerate(log.params['contributors']):
if isinstance(uid, basestring):
record = models.User.load(uid.lower())
if record:
log.params['contributors'][idx] = record._primary_key
# Shouldn't have to do this, but some logs users weren't correctly
# migrated; may have to do with inconsistent backrefs
data = log.to_storage()
if data['user']:
record = models.User.load(data['user'].lower())
if record:
log.user = record
log.save()
def migrate_guid_node(node):
"""Migrate non-reference fields containing primary keys on nodes.
"""
for idx, contributor in enumerate(node.contributor_list):
if 'id' in contributor:
record = models.User.load(contributor['id'].lower())
if record:
node.contributor_list[idx]['id'] = record._primary_key
for idx, fork in enumerate(node.node__forked):
if isinstance(fork, basestring):
record = models.Node.load(fork.lower())
if record:
node.node__forked[idx] = record._primary_key
for idx, registration in enumerate(node.node__registrations):
if isinstance(registration, basestring):
record = models.Node.load(registration.lower())
if record:
node.node__registrations[idx] = record._primary_key
for page in node.wiki_pages_current:
record = models.NodeWikiPage.load(str(node.wiki_pages_current[page]).lower())
if record:
node.wiki_pages_current[page] = record._primary_key
for page in node.wiki_pages_versions:
for idx, wid in enumerate(node.wiki_pages_versions[page]):
record = models.NodeWikiPage.load(str(wid).lower())
if record:
node.wiki_pages_versions[page][idx] = record._primary_key
for fname in node.files_current:
record = models.NodeFile.load(str(node.files_current[fname]).lower())
if record:
node.files_current[fname] = record._primary_key
for fname in node.files_versions:
for idx, fid in enumerate(node.files_versions[fname]):
record = models.NodeFile.load(str(fid).lower())
if record:
node.files_versions[fname][idx] = record._primary_key
node.save()
def migrate_guid_wiki(wiki):
"""Migrate non-reference fields containing primary keys on wiki pages.
"""
data = wiki.to_storage()
uid = data.get('user')
if uid:
record = models.User.load(uid.lower())
if record:
wiki.user = record
pid = data.get('node')
if pid:
record = models.Node.load(pid.lower())
if record:
wiki.node = record
wiki.save()
if __name__ == '__main__':
t0 = time.time()
# Lower-case PKs and ensure GUIDs
migrate_guid(guid_models)
# Manual migrations
for node in models.Node.find():
#print 'Migrating node', node._primary_key
migrate_guid_node(node)
for log in models.NodeLog.find():
#print 'Migrating log', log._primary_key
migrate_guid_log(log)
for wiki in models.NodeWikiPage.find():
#print 'Migrating wiki', wiki._primary_key
migrate_guid_wiki(wiki)
print 'Took {}'.format(time.time() - t0)
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/encodings/cp500.py | 266 | 13121 | """ Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
drowsy810301/NTHUOJ_web | vjudge/submit.py | 1 | 2981 | """
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import requests, base64, json
from problem.models import Submission, Problem
from vjudge.models import VjudgeID
from vjudge.status import status_url
from utils.config_info import get_config
from utils.log_info import get_logger
logger = get_logger()
vjudge_username = get_config('vjudge', 'username')
vjudge_password = get_config('vjudge', 'password')
login_url = 'http://acm.hust.edu.cn/vjudge/user/login.action'
submit_url = 'http://acm.hust.edu.cn/vjudge/problem/submit.action'
LANGUAGE_CHOICE = {
Problem.UVA_JUDGE: {Problem.C: 1 ,Problem.CPP: 3, Problem.CPP11: 5},
Problem.ICPC_JUDGE: {Problem.C: 1 ,Problem.CPP: 3, Problem.CPP11: 5},
Problem.POJ_JUDGE: {Problem.C: 1 ,Problem.CPP: 0, Problem.CPP11: 0}
}
def submit_to_vjudge(code, submission):
try:
# Convert to vjudge problem id
problem = submission.problem
vjudge_id = VjudgeID.objects.get(
judge_source=problem.judge_type.replace('OTHER_', ''),
judge_source_id=problem.other_judge_id).vjudge_id
language = LANGUAGE_CHOICE[problem.judge_type][submission.language]
session = requests.Session()
session.post(login_url,
data={'username': vjudge_username, 'password': vjudge_password},
timeout=3)
response = session.post(submit_url,
data={'language':language, 'isOpen':0, 'source': base64.b64encode(code), 'id': vjudge_id},
timeout=3)
# Find vjudge sid
raw_status = requests.get(status_url, timeout=5).text.encode("utf-8")
raw_status = json.loads(raw_status)['data']
sid = raw_status[0][0]
submission.other_judge_sid = sid
submission.status = Submission.JUDGING
except:
logger.error('Submission %d fails to submit to vjudge' % submission.id)
submission.status = Submission.JUDGE_ERROR
finally:
submission.save()
| mit |
Ghatage/peloton | third_party/logcabin/scripts/electionperf.py | 8 | 4444 | #!/usr/bin/env python
# Copyright (c) 2012 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This runs a LogCabin cluster and continually kills off the leader, timing how
long each leader election takes.
"""
from __future__ import print_function
from common import sh, captureSh, Sandbox, hosts
import re
import subprocess
import sys
import time
num_servers = 5
def same(seq):
for x in seq:
if x != seq[0]:
return False
return True
def await_stable_leader(sandbox, server_ids, after_term=0):
while True:
server_beliefs = {}
for server_id in server_ids:
server_beliefs[server_id] = {'leader': None,
'term': None,
'wake': None}
b = server_beliefs[server_id]
for line in open('debug/%d' % server_id):
m = re.search('All hail leader (\d+) for term (\d+)', line)
if m is not None:
b['leader'] = int(m.group(1))
b['term'] = int(m.group(2))
continue
m = re.search('Now leader for term (\d+)', line)
if m is not None:
b['leader'] = server_id
b['term'] = int(m.group(1))
continue
m = re.search('Running for election in term (\d+)', line)
if m is not None:
b['wake'] = int(m.group(1))
terms = [b['term'] for b in server_beliefs.values()]
leaders = [b['leader'] for b in server_beliefs.values()]
if same(terms) and terms[0] > after_term:
assert same(leaders), server_beliefs
return {'leader': leaders[0],
'term': terms[0],
'num_woken': sum([1 for b in server_beliefs.values() if b['wake'] > after_term])}
else:
time.sleep(.25)
sandbox.checkFailures()
with Sandbox() as sandbox:
sh('rm -f debug/*')
sh('mkdir -p debug')
server_ids = range(1, num_servers + 1)
servers = {}
def start(server_id):
host = hosts[server_id - 1]
command = 'build/LogCabin -i %d' % server_id
print('Starting LogCabin -i %d on %s' % (server_id, host[0]))
server = sandbox.rsh(host[0], command, bg=True,
stderr=open('debug/%d' % server_id, 'w'))
servers[server_id] = server
for server_id in server_ids:
start(server_id)
num_terms = []
num_woken = []
for i in range(100):
old = await_stable_leader(sandbox, server_ids)
print('Server %d is the leader in term %d' % (old['leader'], old['term']))
print('Killing server %d' % old['leader'])
sandbox.kill(servers[old['leader']])
servers.pop(old['leader'])
server_ids.remove(old['leader'])
new = await_stable_leader(sandbox, server_ids, after_term=old['term'])
print('Server %d is the leader in term %d' % (new['leader'], new['term']))
sandbox.checkFailures()
num_terms.append(new['term'] - old['term'])
print('Took %d terms to elect a new leader' % (new['term'] - old['term']))
num_woken.append(new['num_woken'])
print('%d servers woke up' % (new['num_woken']))
server_ids.append(old['leader'])
start(old['leader'])
num_terms.sort()
print('Num terms:',
file=sys.stderr)
print('\n'.join(['%d: %d' % (i + 1, term) for (i, term) in enumerate(num_terms)]),
file=sys.stderr)
num_woken.sort()
print('Num woken:',
file=sys.stderr)
print('\n'.join(['%d: %d' % (i + 1, n) for (i, n) in enumerate(num_woken)]),
file=sys.stderr)
| apache-2.0 |
vitordouzi/sigtrec_eval | sigtrec_eval.py | 1 | 6354 | import sys, os, subprocess, math, multiprocessing, random
import numpy as np
from numpy import nan
import pandas as pd
from scipy.stats.mstats import ttest_rel
from scipy.stats import ttest_ind, wilcoxon
from imblearn.over_sampling import RandomOverSampler, SMOTE
from collections import namedtuple
Result = namedtuple('Result', ['qrelFileName', 'datasetid', 'resultsFiles', 'nameApproach'])
def getFileName(qrelFile):
return os.path.basename(qrelFile)
class SIGTREC_Eval():
def __init__(self, cv=0, seed=42, round_=4, trec_eval=os.path.join(".", os.path.dirname(__file__), "trec_eval")):
self.nameApp = {}
self.cv = cv
self.seed = seed
self.trec_eval = trec_eval
self.round = round_
random.seed(seed)
def _build_F1(self, qrelFileName, to_compare, m, top):
command = ' '.join([self.trec_eval, qrelFileName, to_compare, '-q ', '-M %d' % top, '-m %s.%d'])
content_P = str(subprocess.Popen(command % ('P', top), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0])[2:-1].split('\\n')
content_R = str(subprocess.Popen(command % ('recall', top), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0])[2:-1].split('\\n')
content_F1 = []
for i in range(len(content_P)):
part_P = content_P[i].split('\\t')
part_R = content_R[i].split('\\t')
if len(part_P) != len(part_R) or len(part_P) < 3:
continue
if part_P[1] != part_R[1]:
print(part_P[1], part_R[1])
else:
Pre = float(part_P[2])
Rec = float(part_R[2])
if Pre == 0. or Rec == 0.:
content_F1.append( 'F1_%d\\t%s\\t0.' % ( top, part_P[1] ) )
else:
line = 'F1_%d\\t%s\\t%.4f' % ( top, part_P[1], (2.*Pre*Rec)/(Pre+Rec) )
content_F1.append( line )
return content_F1
def build_df(self, results, measures, top):
raw = []
qtd = len(measures)*sum([ len(input_result.resultsFiles) for input_result in results])
i=0
for input_result in results:
self.nameApp[input_result.datasetid] = []
for m in measures:
for (idx, to_compare) in enumerate(input_result.resultsFiles):
self.nameApp[input_result.datasetid].append(getFileName(to_compare))
print("\r%.2f%%" % (100.*i/qtd),end='')
i+=1
if m.startswith("F1"):
content = self._build_F1(input_result.qrelFileName, to_compare, m, top=top)
else:
############################################################################################## Tamanho 10 FIXADO ##############################################################################################
command = ' '.join([self.trec_eval, input_result.qrelFileName, to_compare, '-q -M %d' % top, '-m', m])
content = str(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0])[2:-1].split('\\n')
raw.extend([ (input_result.datasetid, idx, getFileName(to_compare), *( w.strip() for w in line.split('\\t') )) for line in content ][:-1])
print("\r100.00%%")
df_raw = pd.DataFrame(list(filter(lambda x: not x[4]=='all', raw)), columns=['qrel', 'idx_approach', 'approach', 'measure', 'docid', 'result'])
df_finale = pd.pivot_table(df_raw, index=['qrel', 'docid'], columns=['idx_approach','measure'], values='result', aggfunc='first')
df_finale.reset_index()
df_finale[np.array(df_finale.columns)] = df_finale[np.array(df_finale.columns)].astype(np.float64)
df_finale.replace('None', 0.0, inplace=True)
df_finale.replace(nan, 0.0, inplace=True)
#df_finale = df_finale[~df_finale['docid'].isin(['all'])]
df_finale['fold'] = [0]*len(df_finale)
if self.cv > 0:
for (qrel, qrel_group) in df_finale.groupby('qrel'):
folds=(list(range(self.cv))*math.ceil(len(qrel_group)/self.cv))[:len(qrel_group)]
random.shuffle(folds)
df_finale.loc[qrel, 'fold'] = folds
#with pd.option_context('display.max_rows', None, 'display.max_columns', 10000000000):
# print(df_finale)
return df_finale
def get_test(self, test, pbase, pcomp, multi_test=False):
if np.array_equal(pbase.values, pcomp.values):
pvalue = 1.
else:
if test == 'student':
(tvalue, pvalue) = ttest_rel(pbase, pcomp)
elif test == 'wilcoxon':
(tvalue, pvalue) = wilcoxon(pbase, pcomp)
elif test == 'welcht':
(tvalue, pvalue) = ttest_ind(pbase, pcomp, equal_var=False)
if pvalue < 0.05:
pbase_mean = pbase.mean()
pcomp_mean = pcomp.mean()
if pvalue < 0.01:
if pbase_mean > pcomp_mean:
result_test = '▼ '
else:
result_test = '▲ '
else:
if pbase_mean > pcomp_mean:
result_test = 'ᐁ '
else:
result_test = 'ᐃ '
else:
if not multi_test:
result_test = ' '
else:
result_test = '⏺ '
return result_test
def build_printable(self, table, significance_tests):
printable = {}
for qrel, qrel_group in table.groupby('qrel'):
raw = []
base = qrel_group.loc[:,0]
for idx_app in [idx for idx in qrel_group.columns.levels[0] if type(idx) == int]:
instance = [ self.nameApp[qrel][idx_app] ]
for m in qrel_group[idx_app].columns:
array_results = qrel_group[idx_app][m]
#print(qrel_group.groupby('fold').mean()[idx_app][m])
mean_measure_folds = qrel_group.groupby('fold').mean()[idx_app][m].mean()
test_result=""
for test in significance_tests:
if idx_app > 0:
test_result+=(self.get_test(test, base[m], array_results, len(significance_tests)>1))
else:
test_result+=('bl ')
instance.append('%f %s' % (round(mean_measure_folds,self.round), test_result) )
raw.append(instance)
printable[qrel] = pd.DataFrame(raw, columns=['app', *(table.columns.levels[1].get_values())[:-1]])
return printable
def get_sampler(self, sampler_name):
if sampler_name == "ros" or sampler_name == 'RandomOverSampler':
return RandomOverSampler(random_state=self.seed)
if sampler_name == "SMOTE" or sampler_name == "smote":
return SMOTE(random_state=self.seed)
def build_over_sample(self, df, sampler):
raw = []
for fold, fold_group in df.groupby('fold'):
y = pd.factorize(fold_group.index.get_level_values('qrel'))[0]
X_sampled, y_res = sampler.fit_sample(fold_group, y)
raw.extend(X_sampled)
df_sampled = pd.DataFrame(raw, columns=df.columns)
df_sampled['qrel'] = [sampler.__class__.__name__]*len(df_sampled)
self.nameApp[sampler.__class__.__name__] = self.nameApp[list(self.nameApp.keys())[0]]
return df_sampled
| mit |
zerothi/sisl | sisl/quaternion.py | 1 | 6040 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import math as m
import numpy as np
from ._internal import set_module
__all__ = ['Quaternion']
@set_module("sisl")
class Quaternion:
"""
Quaternion object to enable easy rotational quantities.
"""
def __init__(self, angle=0., v=None, rad=False):
""" Create quaternion object with angle and vector """
if rad:
half = angle / 2
else:
half = angle / 360 * m.pi
self._v = np.empty([4], np.float64)
self._v[0] = m.cos(half)
if v is None:
v = np.array([1, 0, 0], np.float64)
self._v[1:] = np.array(v[:3], np.float64) * m.sin(half)
def copy(self):
""" Return deepcopy of itself """
q = Quaternion()
q._v = np.copy(self._v)
return q
def conj(self):
""" Returns the conjugate of it-self """
q = self.copy()
q._v[1:] *= -1
return q
def norm(self):
""" Returns the norm of this quaternion """
return np.sqrt(np.sum(self._v**2))
@property
def degree(self):
""" Returns the angle associated with this quaternion (in degree)"""
return m.acos(self._v[0]) * 360. / m.pi
@property
def radian(self):
""" Returns the angle associated with this quaternion (in radians)"""
return m.acos(self._v[0]) * 2.
angle = radian
def rotate(self, v):
""" Rotates 3-dimensional vector ``v`` with the associated quaternion """
if len(v.shape) == 1:
q = self.copy()
q._v[0] = 1.
q._v[1:] = v[:]
q = self * q * self.conj()
return q._v[1:]
# We have a matrix of vectors
# Instead of doing it per-vector, we do it in chunks
v1 = np.copy(self._v)
v2 = np.copy(self.conj()._v)
s = np.copy(v.shape)
# First "flatten"
v.shape = (-1, 3)
f = np.empty([4, v.shape[0]], v.dtype)
f[0, :] = v1[0] - v1[1] * v[:, 0] - v1[2] * v[:, 1] - v1[3] * v[:, 2]
f[1, :] = v1[0] * v[:, 0] + v1[1] + v1[2] * v[:, 2] - v1[3] * v[:, 1]
f[2, :] = v1[0] * v[:, 1] - v1[1] * v[:, 2] + v1[2] + v1[3] * v[:, 0]
f[3, :] = v1[0] * v[:, 2] + v1[1] * v[:, 1] - v1[2] * v[:, 0] + v1[3]
# Create actual rotated array
nv = np.empty(v.shape, v.dtype)
nv[:, 0] = f[0, :] * v2[1] + f[1, :] * \
v2[0] + f[2, :] * v2[3] - f[3, :] * v2[2]
nv[:, 1] = f[0, :] * v2[2] - f[1, :] * \
v2[3] + f[2, :] * v2[0] + f[3, :] * v2[1]
nv[:, 2] = f[0, :] * v2[3] + f[1, :] * \
v2[2] - f[2, :] * v2[1] + f[3, :] * v2[0]
del f
# re-create shape
nv.shape = s
return nv
def __eq__(self, other):
""" Returns whether two Quaternions are equal """
return np.allclose(self._v, other._v)
def __neg__(self):
""" Returns the negative quaternion """
q = self.copy()
q._v = -q._v
return q
def __add__(self, other):
""" Returns the added quantity """
q = self.copy()
if isinstance(other, Quaternion):
q._v += other._v
else:
q._v += other
return q
def __sub__(self, other):
""" Returns the subtracted quantity """
q = self.copy()
if isinstance(other, Quaternion):
q._v -= other._v
else:
q._v -= other
return q
def __mul__(self, other):
""" Multiplies with another instance or scalar """
q = self.copy()
if isinstance(other, Quaternion):
v1 = np.copy(self._v)
v2 = other._v
q._v[0] = v1[0] * v2[0] - v1[1] * \
v2[1] - v1[2] * v2[2] - v1[3] * v2[3]
q._v[1] = v1[0] * v2[1] + v1[1] * \
v2[0] + v1[2] * v2[3] - v1[3] * v2[2]
q._v[2] = v1[0] * v2[2] - v1[1] * \
v2[3] + v1[2] * v2[0] + v1[3] * v2[1]
q._v[3] = v1[0] * v2[3] + v1[1] * \
v2[2] - v1[2] * v2[1] + v1[3] * v2[0]
else:
q._v *= other
return q
def __div__(self, other):
""" Divides with a scalar """
if isinstance(other, Quaternion):
raise ValueError("Do not know how to divide a quaternion " +
"with a quaternion.")
return self * (1. / other)
__truediv__ = __div__
def __iadd__(self, other):
""" In-place addition """
if isinstance(other, Quaternion):
self._v += other._v
else:
self._v += other
return self
def __isub__(self, other):
""" In-place subtraction """
if isinstance(other, Quaternion):
self._v -= other._v
else:
self._v -= other
return self
# The in-place operators
def __imul__(self, other):
""" In-place multiplication """
if isinstance(other, Quaternion):
v1 = np.copy(self._v)
v2 = other._v
self._v[0] = v1[0] * v2[0] - v1[1] * \
v2[1] - v1[2] * v2[2] - v1[3] * v2[3]
self._v[1] = v1[0] * v2[1] + v1[1] * \
v2[0] + v1[2] * v2[3] - v1[3] * v2[2]
self._v[2] = v1[0] * v2[2] - v1[1] * \
v2[3] + v1[2] * v2[0] + v1[3] * v2[1]
self._v[3] = v1[0] * v2[3] + v1[1] * \
v2[2] - v1[2] * v2[1] + v1[3] * v2[0]
else:
self._v *= other
return self
def __idiv__(self, other):
""" In-place division """
if isinstance(other, Quaternion):
raise ValueError("Do not know how to divide a quaternion " +
"with a quaternion.")
# use imul
self._v /= other
return self
__itruediv__ = __idiv__
| lgpl-3.0 |
mcgoddard/widgetr | env/Lib/site-packages/pip/compat/__init__.py | 49 | 2996 | """Stuff that differs in different Python versions and platform
distributions."""
from __future__ import absolute_import, division
import os
import imp
import sys
from pip._vendor.six import text_type
try:
from logging.config import dictConfig as logging_dictConfig
except ImportError:
from pip.compat.dictconfig import dictConfig as logging_dictConfig
try:
import ipaddress
except ImportError:
from pip._vendor import ipaddress
__all__ = [
"logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str",
"native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS",
]
uses_pycache = hasattr(imp, 'cache_from_source')
if sys.version_info >= (3,):
def console_to_str(s):
try:
return s.decode(sys.__stdout__.encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def native_str(s, replace=False):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace' if replace else 'strict')
return s
else:
def console_to_str(s):
return s
def native_str(s, replace=False):
# Replace is ignored -- unicode to UTF-8 can't fail
if isinstance(s, text_type):
return s.encode('utf-8')
return s
def total_seconds(td):
if hasattr(td, "total_seconds"):
return td.total_seconds()
else:
val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
return val / 10 ** 6
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = ['python', 'wsgiref']
if sys.version_info >= (2, 7):
stdlib_pkgs.extend(['argparse'])
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
| mit |
yaoandw/joke | Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/gyptest.py | 525 | 7988 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
KyleAMoore/KanjiNani | Android/.buildozer/android/platform/build/build/other_builds/kivy-python3crystax-sdl2/armeabi-v7a/kivy/kivy/uix/stacklayout.py | 21 | 11363 | '''
Stack Layout
============
.. only:: html
.. image:: images/stacklayout.gif
:align: right
.. only:: latex
.. image:: images/stacklayout.png
:align: right
.. versionadded:: 1.0.5
The :class:`StackLayout` arranges children vertically or horizontally, as many
as the layout can fit. The size of the individual children widgets do not
have to be uniform.
For example, to display widgets that get progressively larger in width::
root = StackLayout()
for i in range(25):
btn = Button(text=str(i), width=40 + i * 5, size_hint=(None, 0.15))
root.add_widget(btn)
.. image:: images/stacklayout_sizing.png
:align: left
'''
__all__ = ('StackLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, OptionProperty, \
ReferenceListProperty, VariableListProperty
def _compute_size(c, available_size, idx):
sh_min = c.size_hint_min[idx]
sh_max = c.size_hint_max[idx]
val = c.size_hint[idx] * available_size
if sh_min is not None:
if sh_max is not None:
return max(min(sh_max, val), sh_min)
return max(val, sh_min)
if sh_max is not None:
return min(sh_max, val)
return val
class StackLayout(Layout):
'''Stack layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a single argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a single argument form [padding].
.. versionchanged:: 1.7.0
Replaced the NumericProperty with a VariableListProperty.
:attr:`padding` is a
:class:`~kivy.properties.VariableListProperty` and defaults to
[0, 0, 0, 0].
'''
orientation = OptionProperty('lr-tb', options=(
'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt',
'bt-rl'))
'''Orientation of the layout.
:attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'lr-tb'.
Valid orientations are 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',
'bt-lr', 'rl-bt' and 'bt-rl'.
.. versionchanged:: 1.5.0
:attr:`orientation` now correctly handles all valid combinations of
'lr','rl','tb','bt'. Before this version only 'lr-tb' and
'tb-lr' were supported, and 'tb-lr' was misnamed and placed
widgets from bottom to top and from right to left (reversed compared
to what was expected).
.. note::
'lr' means Left to Right.
'rl' means Right to Left.
'tb' means Top to Bottom.
'bt' means Bottom to Top.
'''
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties.
'''
def __init__(self, **kwargs):
super(StackLayout, self).__init__(**kwargs)
trigger = self._trigger_layout
fbind = self.fbind
fbind('padding', trigger)
fbind('spacing', trigger)
fbind('children', trigger)
fbind('orientation', trigger)
fbind('size', trigger)
fbind('pos', trigger)
def do_layout(self, *largs):
if not self.children:
self.minimum_size = (0., 0.)
return
# optimize layout by preventing looking at the same attribute in a loop
selfpos = self.pos
selfsize = self.size
orientation = self.orientation.split('-')
padding_left = self.padding[0]
padding_top = self.padding[1]
padding_right = self.padding[2]
padding_bottom = self.padding[3]
padding_x = padding_left + padding_right
padding_y = padding_top + padding_bottom
spacing_x, spacing_y = self.spacing
# Determine which direction and in what order to place the widgets
posattr = [0] * 2
posdelta = [0] * 2
posstart = [0] * 2
for i in (0, 1):
posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))
k = posattr[i]
if orientation[i] == 'lr':
# left to right
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_left
elif orientation[i] == 'bt':
# bottom to top
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_bottom
elif orientation[i] == 'rl':
# right to left
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_right
else:
# top to bottom
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_top
innerattr, outerattr = posattr
ustart, vstart = posstart
deltau, deltav = posdelta
del posattr, posdelta, posstart
u = ustart # inner loop position variable
v = vstart # outer loop position variable
# space calculation, used for determining when a row or column is full
if orientation[0] in ('lr', 'rl'):
sv = padding_y # size in v-direction, for minimum_size property
su = padding_x # size in h-direction
spacing_u = spacing_x
spacing_v = spacing_y
padding_u = padding_x
padding_v = padding_y
else:
sv = padding_x # size in v-direction, for minimum_size property
su = padding_y # size in h-direction
spacing_u = spacing_y
spacing_v = spacing_x
padding_u = padding_y
padding_v = padding_x
# space calculation, row height or column width, for arranging widgets
lv = 0
urev = (deltau < 0)
vrev = (deltav < 0)
firstchild = self.children[0]
sizes = []
lc = []
for c in reversed(self.children):
if c.size_hint[outerattr] is not None:
c.size[outerattr] = max(
1, _compute_size(c, selfsize[outerattr] - padding_v,
outerattr))
# does the widget fit in the row/column?
ccount = len(lc)
totalsize = availsize = max(
0, selfsize[innerattr] - padding_u - spacing_u * ccount)
if not lc:
if c.size_hint[innerattr] is not None:
childsize = max(1, _compute_size(c, totalsize, innerattr))
else:
childsize = max(0, c.size[innerattr])
availsize = selfsize[innerattr] - padding_u - childsize
testsizes = [childsize]
else:
testsizes = [0] * (ccount + 1)
for i, child in enumerate(lc):
if availsize <= 0:
# no space left but we're trying to add another widget.
availsize = -1
break
if child.size_hint[innerattr] is not None:
testsizes[i] = childsize = max(
1, _compute_size(child, totalsize, innerattr))
else:
childsize = max(0, child.size[innerattr])
testsizes[i] = childsize
availsize -= childsize
if c.size_hint[innerattr] is not None:
testsizes[-1] = max(
1, _compute_size(c, totalsize, innerattr))
else:
testsizes[-1] = max(0, c.size[innerattr])
availsize -= testsizes[-1]
# Tiny value added in order to avoid issues with float precision
# causing unexpected children reordering when parent resizes.
# e.g. if size is 101 and children size_hint_x is 1./5
# 5 children would not fit in one line because 101*(1./5) > 101/5
if (availsize + 1e-10) >= 0 or not lc:
# even if there's no space, we always add one widget to a row
lc.append(c)
sizes = testsizes
lv = max(lv, c.size[outerattr])
continue
# apply the sizes
for i, child in enumerate(lc):
if child.size_hint[innerattr] is not None:
child.size[innerattr] = sizes[i]
# push the line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
# v position is actually the top/right side of the widget
# when going from high to low coordinate values,
# we need to subtract the height/width from the position.
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
v += deltav * lv
v += deltav * spacing_v
lc = [c]
lv = c.size[outerattr]
if c.size_hint[innerattr] is not None:
sizes = [
max(1, _compute_size(c, selfsize[innerattr] - padding_u,
innerattr))]
else:
sizes = [max(0, c.size[innerattr])]
u = ustart
if lc:
# apply the sizes
for i, child in enumerate(lc):
if child.size_hint[innerattr] is not None:
child.size[innerattr] = sizes[i]
# push the last (incomplete) line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
self.minimum_size[outerattr] = sv
| gpl-3.0 |
pombredanne/wrapt | tests/test_inner_classmethod.py | 1 | 6870 | from __future__ import print_function
import unittest
import inspect
import imp
import wrapt
from compat import PY2, PY3, exec_
DECORATORS_CODE = """
import wrapt
@wrapt.decorator
def passthru_decorator(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
"""
decorators = imp.new_module('decorators')
exec_(DECORATORS_CODE, decorators.__dict__, decorators.__dict__)
class Class(object):
@classmethod
def function(self, arg):
'''documentation'''
return arg
Original = Class
class Class(object):
@decorators.passthru_decorator
@classmethod
def function(self, arg):
'''documentation'''
return arg
class TestNamingInnerClassMethod(unittest.TestCase):
def test_class_object_name(self):
# Test preservation of instance method __name__ attribute.
self.assertEqual(Class.function.__name__,
Original.function.__name__)
def test_instance_object_name(self):
# Test preservation of instance method __name__ attribute.
self.assertEqual(Class().function.__name__,
Original().function.__name__)
def test_class_object_qualname(self):
# Test preservation of instance method __qualname__ attribute.
try:
__qualname__ = Original.original.__qualname__
except AttributeError:
pass
else:
self.assertEqual(Class.function.__qualname__, __qualname__)
def test_instance_object_qualname(self):
# Test preservation of instance method __qualname__ attribute.
try:
__qualname__ = Original().original.__qualname__
except AttributeError:
pass
else:
self.assertEqual(Class().function.__qualname__, __qualname__)
def test_class_module_name(self):
# Test preservation of instance method __module__ attribute.
self.assertEqual(Class.function.__module__,
Original.function.__module__)
def test_instance_module_name(self):
# Test preservation of instance method __module__ attribute.
self.assertEqual(Class().function.__module__,
Original().function.__module__)
def test_class_doc_string(self):
# Test preservation of instance method __doc__ attribute.
self.assertEqual(Class.function.__doc__,
Original.function.__doc__)
def test_instance_doc_string(self):
# Test preservation of instance method __doc__ attribute.
self.assertEqual(Class().function.__doc__,
Original().function.__doc__)
def test_class_argspec(self):
# Test preservation of instance method argument specification.
original_argspec = inspect.getargspec(Original.function)
function_argspec = inspect.getargspec(Class.function)
self.assertEqual(original_argspec, function_argspec)
def test_instance_argspec(self):
# Test preservation of instance method argument specification.
original_argspec = inspect.getargspec(Original().function)
function_argspec = inspect.getargspec(Class().function)
self.assertEqual(original_argspec, function_argspec)
def test_class_isinstance(self):
# Test preservation of isinstance() checks.
self.assertTrue(isinstance(Class.function,
type(Original.function)))
def test_instance_isinstance(self):
# Test preservation of isinstance() checks.
self.assertTrue(isinstance(Class().function,
type(Original().function)))
class TestCallingInnerClassMethod(unittest.TestCase):
def test_class_call_function(self):
# Test calling classmethod.
_args = (1, 2)
_kwargs = {'one': 1, 'two': 2}
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
self.assertEqual(instance, Class)
self.assertEqual(args, _args)
self.assertEqual(kwargs, _kwargs)
return wrapped(*args, **kwargs)
@_decorator
def _function(*args, **kwargs):
return args, kwargs
class Class(object):
@_decorator
@classmethod
def _function(cls, *args, **kwargs):
return (args, kwargs)
result = Class._function(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_instance_call_function(self):
# Test calling classmethod via class instance.
_args = (1, 2)
_kwargs = {'one': 1, 'two': 2}
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
self.assertEqual(instance, Class)
self.assertEqual(args, _args)
self.assertEqual(kwargs, _kwargs)
return wrapped(*args, **kwargs)
@_decorator
def _function(*args, **kwargs):
return args, kwargs
class Class(object):
@_decorator
@classmethod
def _function(cls, *args, **kwargs):
return (args, kwargs)
result = Class()._function(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_class_call_function_nested_decorators(self):
# Test calling classmethod.
_args = (1, 2)
_kwargs = {'one': 1, 'two': 2}
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
self.assertEqual(instance, Class)
self.assertEqual(args, _args)
self.assertEqual(kwargs, _kwargs)
return wrapped(*args, **kwargs)
@_decorator
def _function(*args, **kwargs):
return args, kwargs
class Class(object):
@_decorator
@_decorator
@classmethod
def _function(cls, *args, **kwargs):
return (args, kwargs)
result = Class._function(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_instance_call_function_nested_decorators(self):
# Test calling classmethod via class instance.
_args = (1, 2)
_kwargs = {'one': 1, 'two': 2}
@wrapt.decorator
def _decorator(wrapped, instance, args, kwargs):
self.assertEqual(instance, Class)
self.assertEqual(args, _args)
self.assertEqual(kwargs, _kwargs)
return wrapped(*args, **kwargs)
@_decorator
def _function(*args, **kwargs):
return args, kwargs
class Class(object):
@_decorator
@_decorator
@classmethod
def _function(cls, *args, **kwargs):
return (args, kwargs)
result = Class()._function(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
ssalevan/func | func/yaml/dump.py | 12 | 8711 | """
pyyaml legacy
Copyright (c) 2001 Steve Howell and Friends; All Rights Reserved
(see open source license information in docs/ directory)
"""
import types
import string
from types import StringType, UnicodeType, IntType, FloatType
from types import DictType, ListType, TupleType, InstanceType
from klass import hasMethod, isDictionary
import re
"""
The methods from this module that are exported to the top
level yaml package should remain stable. If you call
directly into other methods of this module, be aware that
they may change or go away in future implementations.
Contact the authors if there are methods in this file
that you wish to remain stable.
"""
def dump(*data):
return Dumper().dump(*data)
def d(data): return dump(data)
def dumpToFile(file, *data):
return Dumper().dumpToFile(file, *data)
class Dumper:
def __init__(self):
self.currIndent = "\n"
self.indent = " "
self.keysrt = None
self.alphaSort = 1 # legacy -- on by default
def setIndent(self, indent):
self.indent = indent
return self
def setSort(self, sort_hint):
self.keysrt = sortMethod(sort_hint)
return self
def dump(self, *data):
self.result = []
self.output = self.outputToString
self.dumpDocuments(data)
return string.join(self.result,"")
def outputToString(self, data):
self.result.append(data)
def dumpToFile(self, file, *data):
self.file = file
self.output = self.outputToFile
self.dumpDocuments(data)
def outputToFile(self, data):
self.file.write(data)
def dumpDocuments(self, data):
for obj in data:
self.anchors = YamlAnchors(obj)
self.output("---")
self.dumpData(obj)
self.output("\n")
def indentDump(self, data):
oldIndent = self.currIndent
self.currIndent += self.indent
self.dumpData(data)
self.currIndent = oldIndent
def dumpData(self, data):
anchor = self.anchors.shouldAnchor(data)
# Disabling anchors because they are lame for strings that the user might want to view/edit -- mdehaan
#
#if anchor:
# self.output(" &%d" % anchor )
#else:
# anchor = self.anchors.isAlias(data)
# if anchor:
# self.output(" *%d" % anchor )
# return
if (data is None):
self.output(' ~')
elif hasMethod(data, 'to_yaml'):
self.dumpTransformedObject(data)
elif hasMethod(data, 'to_yaml_implicit'):
self.output(" " + data.to_yaml_implicit())
elif type(data) is InstanceType:
self.dumpRawObject(data)
elif isDictionary(data):
self.dumpDict(data)
elif type(data) in [ListType, TupleType]:
self.dumpList(data)
else:
self.dumpScalar(data)
def dumpTransformedObject(self, data):
obj_yaml = data.to_yaml()
if type(obj_yaml) is not TupleType:
self.raiseToYamlSyntaxError()
(data, typestring) = obj_yaml
if typestring:
self.output(" " + typestring)
self.dumpData(data)
def dumpRawObject(self, data):
self.output(' !!%s.%s' % (data.__module__, data.__class__.__name__))
self.dumpData(data.__dict__)
def dumpDict(self, data):
keys = data.keys()
if len(keys) == 0:
self.output(" {}")
return
if self.keysrt:
keys = sort_keys(keys,self.keysrt)
else:
if self.alphaSort:
keys.sort()
for key in keys:
self.output(self.currIndent)
self.dumpKey(key)
self.output(":")
self.indentDump(data[key])
def dumpKey(self, key):
if type(key) is TupleType:
self.output("?")
self.indentDump(key)
self.output("\n")
else:
self.output(quote(key))
def dumpList(self, data):
if len(data) == 0:
self.output(" []")
return
for item in data:
self.output(self.currIndent)
self.output("-")
self.indentDump(item)
def dumpScalar(self, data):
if isUnicode(data):
self.output(' "%s"' % repr(data)[2:-1])
elif isMulti(data):
self.dumpMultiLineScalar(data.splitlines())
else:
self.output(" ")
self.output(quote(data))
def dumpMultiLineScalar(self, lines):
self.output(" |")
if lines[-1] == "":
self.output("+")
for line in lines:
self.output(self.currIndent)
self.output(line)
def raiseToYamlSyntaxError(self):
raise """
to_yaml should return tuple w/object to dump
and optional YAML type. Example:
({'foo': 'bar'}, '!!foobar')
"""
#### ANCHOR-RELATED METHODS
def accumulate(obj,occur):
typ = type(obj)
if obj is None or \
typ is IntType or \
typ is FloatType or \
((typ is StringType or typ is UnicodeType) \
and len(obj) < 32): return
obid = id(obj)
if 0 == occur.get(obid,0):
occur[obid] = 1
if typ is ListType:
for x in obj:
accumulate(x,occur)
if typ is DictType:
for (x,y) in obj.items():
accumulate(x,occur)
accumulate(y,occur)
else:
occur[obid] = occur[obid] + 1
class YamlAnchors:
def __init__(self,data):
occur = {}
accumulate(data,occur)
anchorVisits = {}
for (obid, occur) in occur.items():
if occur > 1:
anchorVisits[obid] = 0
self._anchorVisits = anchorVisits
self._currentAliasIndex = 0
def shouldAnchor(self,obj):
ret = self._anchorVisits.get(id(obj),None)
if 0 == ret:
self._currentAliasIndex = self._currentAliasIndex + 1
ret = self._currentAliasIndex
self._anchorVisits[id(obj)] = ret
return ret
return 0
def isAlias(self,obj):
return self._anchorVisits.get(id(obj),0)
### SORTING METHODS
def sort_keys(keys,fn):
tmp = []
for key in keys:
val = fn(key)
if val is None: val = '~'
tmp.append((val,key))
tmp.sort()
return [ y for (x,y) in tmp ]
def sortMethod(sort_hint):
typ = type(sort_hint)
if DictType == typ:
return sort_hint.get
elif ListType == typ or TupleType == typ:
indexes = {}; idx = 0
for item in sort_hint:
indexes[item] = idx
idx += 1
return indexes.get
else:
return sort_hint
### STRING QUOTING AND SCALAR HANDLING
def isStr(data):
# XXX 2.1 madness
if type(data) == type(''):
return 1
if type(data) == type(u''):
return 1
return 0
def doubleUpQuotes(data):
return data.replace("'", "''")
def quote(data):
if not isStr(data):
return str(data)
single = "'"
double = '"'
quote = ''
if len(data) == 0:
return "''"
if hasSpecialChar(data) or data[0] == single:
data = `data`[1:-1]
data = string.replace(data, r"\x08", r"\b")
quote = double
elif needsSingleQuote(data):
quote = single
data = doubleUpQuotes(data)
return "%s%s%s" % (quote, data, quote)
def needsSingleQuote(data):
if re.match(r"^-?\d", data):
return 1
if re.match(r"\*\S", data):
return 1
if data[0] in ['&', ' ']:
return 1
if data[0] == '"':
return 1
if data[-1] == ' ':
return 1
return (re.search(r'[:]', data) or re.search(r'(\d\.){2}', data))
def hasSpecialChar(data):
# need test to drive out '#' from this
return re.search(r'[\t\b\r\f#]', data)
def isMulti(data):
if not isStr(data):
return 0
if hasSpecialChar(data):
return 0
return re.search("\n", data)
def isUnicode(data):
return type(data) == unicode
def sloppyIsUnicode(data):
# XXX - hack to make tests pass for 2.1
return repr(data)[:2] == "u'" and repr(data) != data
import sys
if sys.hexversion < 0x20200000:
isUnicode = sloppyIsUnicode
| gpl-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/urllib3/exceptions.py | 87 | 6604 | from __future__ import absolute_import
from .packages.six.moves.http_client import (
IncompleteRead as httplib_IncompleteRead
)
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when performing security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class BodyNotHttplibCompatible(HTTPError):
"""
Body should be httplib.HTTPResponse like (have an fp attribute which
returns raw chunks) for read_chunked().
"""
pass
class IncompleteRead(HTTPError, httplib_IncompleteRead):
"""
Response length doesn't match expected Content-Length
Subclass of http_client.IncompleteRead to allow int value
for `partial` to avoid creating large objects on streamed
reads.
"""
def __init__(self, partial, expected):
super(IncompleteRead, self).__init__(partial, expected)
def __repr__(self):
return ('IncompleteRead(%i bytes read, '
'%i more expected)' % (self.partial, self.expected))
class InvalidHeader(HTTPError):
"The header provided was somehow invalid."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
class UnrewindableBodyError(HTTPError):
"urllib3 encountered an error when trying to rewind a body"
pass
| gpl-3.0 |
ContinuumBridge/adaptor_test_app | adaptor_test_app.py | 1 | 1179 | #!/usr/bin/env python
# adaptor_test_app.py
"""
Copyright (c) 2014 ContinuumBridge Limited
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
from adaptor_test_app_a import App
App(sys.argv)
| mit |
robert-budde/smarthome | lib/env/stat.py | 2 | 2025 | # System logic 'stat' of SmartHomeNG
#
# schedule is defined in lib.env.logic.yaml
#
import os
import sys
import shutil
import ephem
import psutil
if sh.env.system.libs.ephem_version is not None:
sh.env.system.libs.ephem_version(ephem.__version__, logic.lname)
# lib/env/statistic.py
# Garbage
gc.collect()
if gc.garbage != []:
sh.env.core.garbage(len(gc.garbage), logic.lname)
logger.warning("Garbage: {} objects".format(len(gc.garbage)))
logger.info("Garbage: {}".format(gc.garbage))
del gc.garbage[:]
# Threads
sh.env.core.threads(threading.activeCount(), logic.lname)
# Scheduler: Worker threads
sh.env.core.scheduler.worker_threads(sh.scheduler.get_worker_count(), logic.lname)
sh.env.core.scheduler.idle_threads(sh.scheduler.get_idle_worker_count(), logic.lname)
sh.env.core.scheduler.worker_names(sh.scheduler.get_worker_names(), logic.lname)
# Memory
p = psutil.Process(os.getpid())
mem_info = p.memory_info()
mem = mem_info.rss
sh.env.core.memory(mem, logic.lname)
# System Memory
swap_info = psutil.swap_memory()
sh.env.system.swap(swap_info.used, logic.lname)
sysmem_info = psutil.virtual_memory()
sh.env.system.memory.used(sysmem_info.used, logic.lname)
sh.env.system.memory.percent(sysmem_info.percent, logic.lname)
# Load
l1, l5, l15 = os.getloadavg()
sh.env.system.load(round(l5, 2), logic.lname)
# Diskusage
if sys.version_info > (3, 3):
#pathname = os.path.dirname(sys.argv[0])
absolute_pathname = sh.get_basedir()
#du = shutil.disk_usage(os.path.abspath(pathname))
try:
du = shutil.disk_usage(absolute_pathname)
sh.env.system.diskfree(du.free, logic.lname)
sh.env.system.disksize(du.total, logic.lname)
sh.env.system.diskusage(du.used, logic.lname)
sh.env.system.diskusagepercent(round(du.used / du.total * 100.0, 2), logic.lname)
except:
logger.error("Statistics could not be read using base directory {}".format(absolute_pathname))
if sh.moon:
sh.env.location.moonlight(sh.moon.light(), logic.lname)
| gpl-3.0 |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/lib2to3/patcomp.py | 304 | 7091 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import StringIO
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = unicode(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError("Invalid token: %r" % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
elif value in grammar.opmap:
return grammar.opmap[value]
else:
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
| apache-2.0 |
zhangjiajie/PTP | nexus/test/test_regressions.py | 3 | 5632 | """Regression Tests"""
import os
import re
import unittest
from nexus import NexusReader
from nexus.reader import GenericHandler, DataHandler, TreeHandler
EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), '../examples')
REGRESSION_DIR = os.path.join(os.path.dirname(__file__), 'regression')
class Test_DataHandler_Regression_WhitespaceInMatrix(unittest.TestCase):
"""Regression: Test that leading whitespace in a data matrix is parsed ok"""
def test_regression(self):
nex = NexusReader(os.path.join(REGRESSION_DIR, 'white_space_in_matrix.nex'))
assert nex.blocks['data'].nchar == 2
assert nex.blocks['data'].matrix['Harry'] == ['0', '0']
assert nex.blocks['data'].matrix['Simon'] == ['0', '1']
assert nex.blocks['data'].matrix['Betty'] == ['1', '0']
assert nex.blocks['data'].matrix['Louise'] == ['1', '1']
class Test_TreeHandler_Regression_RandomAPETrees(unittest.TestCase):
"""Regression: Test that we can parse randomly generated APE/R trees"""
def test_regression(self):
nex = NexusReader(os.path.join(REGRESSION_DIR, 'ape_random.trees'))
assert nex.trees.ntrees == 2
class Test_TreeHandler_Regression_BadCharsInTaxaName(unittest.TestCase):
def test_regression(self):
nex = NexusReader(os.path.join(REGRESSION_DIR, 'bad_chars_in_taxaname.trees'))
# did we get a tree block?
assert 'trees' in nex.blocks
# did we find 3 trees?
assert len(nex.blocks['trees'].trees) == 1 == nex.blocks['trees'].ntrees
# did we get the translation parsed properly.
assert nex.trees.was_translated == True
assert len(nex.trees.translators) == 5 # 5 taxa in example trees
# check last entry
assert nex.trees.translators['5'] == 'PALAUNGWA_De.Ang'
# check detranslate
nex.trees.detranslate()
assert '(MANGIC_Bugan,MANGIC_Paliu,MANGIC_Mang,PALAUNGWA_Danaw,PALAUNGWA_De.Ang)' in nex.trees[0]
class Test_TaxaHandler_Regression_Mesquite(unittest.TestCase):
"""Regression: Test that we can parse MESQUITE taxa blocks"""
def setUp(self):
self.nex = NexusReader(os.path.join(REGRESSION_DIR, 'mesquite_taxa_block.nex'))
def test_taxa_block(self):
for taxon in ['A', 'B', 'C']:
assert taxon in self.nex.taxa
# did we get the right number of taxa in the matrix?
assert self.nex.taxa.ntaxa == len(self.nex.taxa.taxa) == 3
def test_taxa_block_attributes(self):
assert 'taxa' in self.nex.blocks
assert len(self.nex.taxa.attributes) == 1
assert 'TITLE Untitled_Block_of_Taxa;' in self.nex.taxa.attributes
def test_write(self):
expected_patterns = [
'^begin taxa;$',
'^\s+TITLE Untitled_Block_of_Taxa;$',
'^\s+dimensions ntax=3;$',
'^\s+taxlabels$',
"^\s+\[1\] 'A'$",
"^\s+\[2\] 'B'$",
"^\s+\[3\] 'C'$",
'^;$',
'^end;$',
]
written = self.nex.write()
for expected in expected_patterns:
assert re.search(expected, written, re.MULTILINE), 'Expected "%s"' % expected
class Test_DataHandler_Regression_Mesquite(unittest.TestCase):
"""Regression: Test that we can parse MESQUITE data blocks"""
def setUp(self):
self.nex = NexusReader()
self.nex.read_string("""
#NEXUS
Begin data;
TITLE something;
Dimensions ntax=2 nchar=2;
Format datatype=standard symbols="01" gap=-;
Matrix
Harry 00
Simon 01
;
End;
""")
def test_attr_find(self):
assert len(self.nex.data.attributes) == 1
def test_write(self):
expected_patterns = [
'^begin data;$',
'^\s+TITLE something;$',
'^\s+dimensions ntax=2 nchar=2;$',
'^\s+format datatype=standard symbols="01" gap=-;$',
"^matrix$",
"^Harry\s+00",
"^Simon\s+01$",
'^\s+;$',
'^end;$',
]
written = self.nex.write()
for expected in expected_patterns:
assert re.search(expected, written, re.MULTILINE), 'Expected "%s"' % expected
class Test_TreeHandler_Regression_Mesquite(unittest.TestCase):
"""Regression: Test that we can parse MESQUITE taxa blocks"""
def setUp(self):
self.nex = NexusReader(os.path.join(REGRESSION_DIR, 'mesquite_formatted_branches.trees'))
def test_attributes(self):
assert len(self.nex.trees.attributes) == 2
assert self.nex.trees.attributes[0] == """Title 'Trees from "temp.trees"';"""
assert self.nex.trees.attributes[1] == """LINK Taxa = Untitled_Block_of_Taxa;"""
def test_found_trees(self):
assert self.nex.trees.ntrees == 1
def test_found_taxa(self):
assert len(self.nex.trees.taxa) == 3
assert 'A' in self.nex.trees.taxa
assert 'B' in self.nex.trees.taxa
assert 'C' in self.nex.trees.taxa
def test_was_translated(self):
assert self.nex.trees.was_translated == True
def test_translation(self):
assert self.nex.trees.translators['1'] == 'A'
assert self.nex.trees.translators['2'] == 'B'
assert self.nex.trees.translators['3'] == 'C'
def test_write(self):
written = self.nex.write()
assert """Title 'Trees from "temp.trees"';""" in written
assert """LINK Taxa = Untitled_Block_of_Taxa;""" in written
| gpl-3.0 |
ayushagrawal288/zamboni | sites/dev/settings_base.py | 6 | 5379 | """private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from mkt.settings import (CACHE_PREFIX, ES_INDEXES,
KNOWN_PROXIES, LOGGING, HOSTNAME)
from .. import splitstrip
import private_base as private
ALLOWED_HOSTS = ['.allizom.org', '.mozflare.net']
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
DATABASES['slave']['ATOMIC_REQUESTS'] = True
DATABASES['slave']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
}
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = private.BROKER_URL
CELERY_ALWAYS_EAGER = False
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
WEBSITE_ICONS_PATH = UPLOADS_PATH + '/website_icons'
FEATURED_APP_BG_PATH = UPLOADS_PATH + '/featured_app_background'
FEED_COLLECTION_BG_PATH = UPLOADS_PATH + '/feed_collection_background'
FEED_SHELF_BG_PATH = UPLOADS_PATH + '/feed_shelf_background'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
WEBAPP_PROMO_IMG_PATH = UPLOADS_PATH + '/webapp_promo_imgs'
WEBSITE_PROMO_IMG_PATH = UPLOADS_PATH + '/website_promo_imgs'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
LOGGING['loggers'].update({
'amqp': {'level': logging.WARNING},
'raven': {'level': logging.WARNING},
'requests': {'level': logging.WARNING},
'z.addons': {'level': logging.DEBUG},
'z.elasticsearch': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
'z.task': {'level': logging.DEBUG},
'z.users': {'level': logging.DEBUG},
})
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
SPIDERMONKEY = '/usr/bin/tracemonkey'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'mkt-dev'
ES_DEFAULT_NUM_REPLICAS = 2
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_dev' % v) for k, v in ES_INDEXES.items())
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = False
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
CLEANCSS_BIN = 'cleancss'
LESS_BIN = 'lessc'
STYLUS_BIN = 'stylus'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 540
VALIDATOR_TIMEOUT = 180
LESS_PREPROCESS = True
XSENDFILE = True
ALLOW_SELF_REVIEWS = True
GOOGLE_ANALYTICS_CREDENTIALS = private.GOOGLE_ANALYTICS_CREDENTIALS
GOOGLE_API_CREDENTIALS = private.GOOGLE_API_CREDENTIALS
MONOLITH_SERVER = 'https://monolith-dev.allizom.org'
GEOIP_URL = 'https://geo-dev-marketplace.allizom.org'
AWS_ACCESS_KEY_ID = private.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = private.AWS_SECRET_ACCESS_KEY
AWS_STORAGE_BUCKET_NAME = private.AWS_STORAGE_BUCKET_NAME
RAISE_ON_SIGNAL_ERROR = True
API_THROTTLE = False
NEWRELIC_ENABLED_LIST = ['dev1.addons.phx1.mozilla.com',
'dev2.addons.phx1.mozilla.com']
NEWRELIC_ENABLE = HOSTNAME in NEWRELIC_ENABLED_LIST
AES_KEYS = private.AES_KEYS
TASK_USER_ID = 4757633
SERVE_TMP_PATH = False
| bsd-3-clause |
IceCubeDev/SpaceOrNot | psycopg2/tests/test_lobject.py | 39 | 15262 | #!/usr/bin/env python
# test_lobject.py - unit test for large objects support
#
# Copyright (C) 2008-2011 James Henstridge <james@jamesh.id.au>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os
import shutil
import tempfile
from functools import wraps
import psycopg2
import psycopg2.extensions
from psycopg2.extensions import b
from testutils import unittest, decorate_all_tests, skip_if_tpc_disabled
from testutils import ConnectingTestCase, skip_if_green
def skip_if_no_lo(f):
@wraps(f)
def skip_if_no_lo_(self):
if self.conn.server_version < 80100:
return self.skipTest("large objects only supported from PG 8.1")
else:
return f(self)
return skip_if_no_lo_
skip_lo_if_green = skip_if_green("libpq doesn't support LO in async mode")
class LargeObjectTestCase(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.lo_oid = None
self.tmpdir = None
def tearDown(self):
if self.tmpdir:
shutil.rmtree(self.tmpdir, ignore_errors=True)
if self.conn.closed:
return
if self.lo_oid is not None:
self.conn.rollback()
try:
lo = self.conn.lobject(self.lo_oid, "n")
except psycopg2.OperationalError:
pass
else:
lo.unlink()
ConnectingTestCase.tearDown(self)
class LargeObjectTests(LargeObjectTestCase):
def test_create(self):
lo = self.conn.lobject()
self.assertNotEqual(lo, None)
self.assertEqual(lo.mode[0], "w")
def test_connection_needed(self):
self.assertRaises(TypeError,
psycopg2.extensions.lobject, [])
def test_open_non_existent(self):
# By creating then removing a large object, we get an Oid that
# should be unused.
lo = self.conn.lobject()
lo.unlink()
self.assertRaises(psycopg2.OperationalError, self.conn.lobject, lo.oid)
def test_open_existing(self):
lo = self.conn.lobject()
lo2 = self.conn.lobject(lo.oid)
self.assertNotEqual(lo2, None)
self.assertEqual(lo2.oid, lo.oid)
self.assertEqual(lo2.mode[0], "r")
def test_open_for_write(self):
lo = self.conn.lobject()
lo2 = self.conn.lobject(lo.oid, "w")
self.assertEqual(lo2.mode[0], "w")
lo2.write(b("some data"))
def test_open_mode_n(self):
# Openning an object in mode "n" gives us a closed lobject.
lo = self.conn.lobject()
lo.close()
lo2 = self.conn.lobject(lo.oid, "n")
self.assertEqual(lo2.oid, lo.oid)
self.assertEqual(lo2.closed, True)
def test_close_connection_gone(self):
lo = self.conn.lobject()
self.conn.close()
lo.close()
def test_create_with_oid(self):
# Create and delete a large object to get an unused Oid.
lo = self.conn.lobject()
oid = lo.oid
lo.unlink()
lo = self.conn.lobject(0, "w", oid)
self.assertEqual(lo.oid, oid)
def test_create_with_existing_oid(self):
lo = self.conn.lobject()
lo.close()
self.assertRaises(psycopg2.OperationalError,
self.conn.lobject, 0, "w", lo.oid)
self.assert_(not self.conn.closed)
def test_import(self):
self.tmpdir = tempfile.mkdtemp()
filename = os.path.join(self.tmpdir, "data.txt")
fp = open(filename, "wb")
fp.write(b("some data"))
fp.close()
lo = self.conn.lobject(0, "r", 0, filename)
self.assertEqual(lo.read(), "some data")
def test_close(self):
lo = self.conn.lobject()
self.assertEqual(lo.closed, False)
lo.close()
self.assertEqual(lo.closed, True)
def test_write(self):
lo = self.conn.lobject()
self.assertEqual(lo.write(b("some data")), len("some data"))
def test_write_large(self):
lo = self.conn.lobject()
data = "data" * 1000000
self.assertEqual(lo.write(data), len(data))
def test_read(self):
lo = self.conn.lobject()
length = lo.write(b("some data"))
lo.close()
lo = self.conn.lobject(lo.oid)
x = lo.read(4)
self.assertEqual(type(x), type(''))
self.assertEqual(x, "some")
self.assertEqual(lo.read(), " data")
def test_read_binary(self):
lo = self.conn.lobject()
length = lo.write(b("some data"))
lo.close()
lo = self.conn.lobject(lo.oid, "rb")
x = lo.read(4)
self.assertEqual(type(x), type(b('')))
self.assertEqual(x, b("some"))
self.assertEqual(lo.read(), b(" data"))
def test_read_text(self):
lo = self.conn.lobject()
snowman = u"\u2603"
length = lo.write(u"some data " + snowman)
lo.close()
lo = self.conn.lobject(lo.oid, "rt")
x = lo.read(4)
self.assertEqual(type(x), type(u''))
self.assertEqual(x, u"some")
self.assertEqual(lo.read(), u" data " + snowman)
def test_read_large(self):
lo = self.conn.lobject()
data = "data" * 1000000
length = lo.write("some" + data)
lo.close()
lo = self.conn.lobject(lo.oid)
self.assertEqual(lo.read(4), "some")
data1 = lo.read()
# avoid dumping megacraps in the console in case of error
self.assert_(data == data1,
"%r... != %r..." % (data[:100], data1[:100]))
def test_seek_tell(self):
lo = self.conn.lobject()
length = lo.write(b("some data"))
self.assertEqual(lo.tell(), length)
lo.close()
lo = self.conn.lobject(lo.oid)
self.assertEqual(lo.seek(5, 0), 5)
self.assertEqual(lo.tell(), 5)
self.assertEqual(lo.read(), "data")
# SEEK_CUR: relative current location
lo.seek(5)
self.assertEqual(lo.seek(2, 1), 7)
self.assertEqual(lo.tell(), 7)
self.assertEqual(lo.read(), "ta")
# SEEK_END: relative to end of file
self.assertEqual(lo.seek(-2, 2), length - 2)
self.assertEqual(lo.read(), "ta")
def test_unlink(self):
lo = self.conn.lobject()
lo.unlink()
# the object doesn't exist now, so we can't reopen it.
self.assertRaises(psycopg2.OperationalError, self.conn.lobject, lo.oid)
# And the object has been closed.
self.assertEquals(lo.closed, True)
def test_export(self):
lo = self.conn.lobject()
lo.write(b("some data"))
self.tmpdir = tempfile.mkdtemp()
filename = os.path.join(self.tmpdir, "data.txt")
lo.export(filename)
self.assertTrue(os.path.exists(filename))
f = open(filename, "rb")
try:
self.assertEqual(f.read(), b("some data"))
finally:
f.close()
def test_close_twice(self):
lo = self.conn.lobject()
lo.close()
lo.close()
def test_write_after_close(self):
lo = self.conn.lobject()
lo.close()
self.assertRaises(psycopg2.InterfaceError, lo.write, b("some data"))
def test_read_after_close(self):
lo = self.conn.lobject()
lo.close()
self.assertRaises(psycopg2.InterfaceError, lo.read, 5)
def test_seek_after_close(self):
lo = self.conn.lobject()
lo.close()
self.assertRaises(psycopg2.InterfaceError, lo.seek, 0)
def test_tell_after_close(self):
lo = self.conn.lobject()
lo.close()
self.assertRaises(psycopg2.InterfaceError, lo.tell)
def test_unlink_after_close(self):
lo = self.conn.lobject()
lo.close()
# Unlink works on closed files.
lo.unlink()
def test_export_after_close(self):
lo = self.conn.lobject()
lo.write(b("some data"))
lo.close()
self.tmpdir = tempfile.mkdtemp()
filename = os.path.join(self.tmpdir, "data.txt")
lo.export(filename)
self.assertTrue(os.path.exists(filename))
f = open(filename, "rb")
try:
self.assertEqual(f.read(), b("some data"))
finally:
f.close()
def test_close_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
# Closing outside of the transaction is okay.
lo.close()
def test_write_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
self.assertRaises(psycopg2.ProgrammingError, lo.write, b("some data"))
def test_read_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
self.assertRaises(psycopg2.ProgrammingError, lo.read, 5)
def test_seek_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
self.assertRaises(psycopg2.ProgrammingError, lo.seek, 0)
def test_tell_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
self.assertRaises(psycopg2.ProgrammingError, lo.tell)
def test_unlink_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
# Unlink of stale lobject is okay
lo.unlink()
def test_export_after_commit(self):
lo = self.conn.lobject()
lo.write(b("some data"))
self.conn.commit()
self.tmpdir = tempfile.mkdtemp()
filename = os.path.join(self.tmpdir, "data.txt")
lo.export(filename)
self.assertTrue(os.path.exists(filename))
f = open(filename, "rb")
try:
self.assertEqual(f.read(), b("some data"))
finally:
f.close()
@skip_if_tpc_disabled
def test_read_after_tpc_commit(self):
self.conn.tpc_begin('test_lobject')
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.tpc_commit()
self.assertRaises(psycopg2.ProgrammingError, lo.read, 5)
@skip_if_tpc_disabled
def test_read_after_tpc_prepare(self):
self.conn.tpc_begin('test_lobject')
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.tpc_prepare()
try:
self.assertRaises(psycopg2.ProgrammingError, lo.read, 5)
finally:
self.conn.tpc_commit()
def test_large_oid(self):
# Test we don't overflow with an oid not fitting a signed int
try:
self.conn.lobject(0xFFFFFFFE)
except psycopg2.OperationalError:
pass
decorate_all_tests(LargeObjectTests, skip_if_no_lo, skip_lo_if_green)
def skip_if_no_truncate(f):
@wraps(f)
def skip_if_no_truncate_(self):
if self.conn.server_version < 80300:
return self.skipTest(
"the server doesn't support large object truncate")
if not hasattr(psycopg2.extensions.lobject, 'truncate'):
return self.skipTest(
"psycopg2 has been built against a libpq "
"without large object truncate support.")
return f(self)
return skip_if_no_truncate_
class LargeObjectTruncateTests(LargeObjectTestCase):
def test_truncate(self):
lo = self.conn.lobject()
lo.write("some data")
lo.close()
lo = self.conn.lobject(lo.oid, "w")
lo.truncate(4)
# seek position unchanged
self.assertEqual(lo.tell(), 0)
# data truncated
self.assertEqual(lo.read(), "some")
lo.truncate(6)
lo.seek(0)
# large object extended with zeroes
self.assertEqual(lo.read(), "some\x00\x00")
lo.truncate()
lo.seek(0)
# large object empty
self.assertEqual(lo.read(), "")
def test_truncate_after_close(self):
lo = self.conn.lobject()
lo.close()
self.assertRaises(psycopg2.InterfaceError, lo.truncate)
def test_truncate_after_commit(self):
lo = self.conn.lobject()
self.lo_oid = lo.oid
self.conn.commit()
self.assertRaises(psycopg2.ProgrammingError, lo.truncate)
decorate_all_tests(LargeObjectTruncateTests,
skip_if_no_lo, skip_lo_if_green, skip_if_no_truncate)
def _has_lo64(conn):
"""Return (bool, msg) about the lo64 support"""
if conn.server_version < 90300:
return (False, "server version %s doesn't support the lo64 API"
% conn.server_version)
if 'lo64' not in psycopg2.__version__:
return (False, "this psycopg build doesn't support the lo64 API")
return (True, "this server and build support the lo64 API")
def skip_if_no_lo64(f):
@wraps(f)
def skip_if_no_lo64_(self):
lo64, msg = _has_lo64(self.conn)
if not lo64: return self.skipTest(msg)
else: return f(self)
return skip_if_no_lo64_
class LargeObject64Tests(LargeObjectTestCase):
def test_seek_tell_truncate_greater_than_2gb(self):
lo = self.conn.lobject()
length = (1 << 31) + (1 << 30) # 2gb + 1gb = 3gb
lo.truncate(length)
self.assertEqual(lo.seek(length, 0), length)
self.assertEqual(lo.tell(), length)
decorate_all_tests(LargeObject64Tests,
skip_if_no_lo, skip_lo_if_green, skip_if_no_truncate, skip_if_no_lo64)
def skip_if_lo64(f):
@wraps(f)
def skip_if_lo64_(self):
lo64, msg = _has_lo64(self.conn)
if lo64: return self.skipTest(msg)
else: return f(self)
return skip_if_lo64_
class LargeObjectNot64Tests(LargeObjectTestCase):
def test_seek_larger_than_2gb(self):
lo = self.conn.lobject()
offset = 1 << 32 # 4gb
self.assertRaises(
(OverflowError, psycopg2.InterfaceError, psycopg2.NotSupportedError),
lo.seek, offset, 0)
def test_truncate_larger_than_2gb(self):
lo = self.conn.lobject()
length = 1 << 32 # 4gb
self.assertRaises(
(OverflowError, psycopg2.InterfaceError, psycopg2.NotSupportedError),
lo.truncate, length)
decorate_all_tests(LargeObjectNot64Tests,
skip_if_no_lo, skip_lo_if_green, skip_if_no_truncate, skip_if_lo64)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
koniiiik/django | tests/sites_framework/migrations/0001_initial.py | 281 | 1672 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomArticle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('places_this_article_should_appear', models.ForeignKey('sites.Site', models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExclusiveArticle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('site', models.ForeignKey('sites.Site', models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SyndicatedArticle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('sites', models.ManyToManyField('sites.Site')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| bsd-3-clause |
credativ/pulp | common/test/unit/common/plugins/test_progress.py | 4 | 13316 | # -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
The tests in this module test the pulp.common.progress module.
"""
from datetime import datetime
import unittest
import mock
from pulp.plugins.model import PublishReport
from pulp.plugins.conduits.repo_sync import RepoSyncConduit
from pulp.common.dateutils import format_iso8601_datetime
from pulp.common.plugins import progress
class TestProgressReport(unittest.TestCase):
"""
Test the ProgressReport class.
"""
def setUp(self):
self.conduit = get_mock_conduit()
def test___init___with_defaults(self):
"""
Test the __init__ method with all default parameters.
"""
report = progress.ProgressReport()
# Make sure all the appropriate attributes were set
self.assertEqual(report.conduit, None)
self.assertEqual(report._state, progress.ProgressReport.STATE_NOT_STARTED)
# The state_times attribute should be a dictionary with only the time the not started state was
# entered
self.assertTrue(isinstance(report.state_times, dict))
self.assertEqual(len(report.state_times), 1)
self.assertTrue(isinstance(report.state_times[progress.ProgressReport.STATE_NOT_STARTED],
datetime))
self.assertEqual(report.error_message, None)
self.assertEqual(report.traceback, None)
def test___init__with_non_defaults(self):
"""
Test the __init__ method when passing in parameters.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime.utcnow()}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
# Make sure all the appropriate attributes were set
self.assertEqual(report.conduit, self.conduit)
self.assertEqual(report._state, state)
self.assertEqual(report.state_times, state_times)
self.assertEqual(report.error_message, error_message)
self.assertEqual(report.traceback, traceback)
def test_build_final_report_failure(self):
"""
Test build_final_report() when there is a failure.
"""
report = progress.ProgressReport(self.conduit, state=progress.ProgressReport.STATE_FAILED)
conduit_report = report.build_final_report()
# The success report call should not have been made
self.assertEqual(self.conduit.build_success_report.call_count, 0)
# We should have called the failure report once with the serialized progress report as the summary
self.conduit.build_failure_report.assert_called_once_with(report.build_progress_report(), None)
# Inspect the conduit report
self.assertEqual(conduit_report.success_flag, False)
self.assertEqual(conduit_report.canceled_flag, False)
self.assertEqual(conduit_report.summary, report.build_progress_report())
self.assertEqual(conduit_report.details, None)
def test_build_final_report_success(self):
"""
Test build_final_report() when there is success.
"""
report = progress.ProgressReport(self.conduit, state=progress.ProgressReport.STATE_COMPLETE)
conduit_report = report.build_final_report()
# The failure report call should not have been made
self.assertEqual(self.conduit.build_failure_report.call_count, 0)
# We should have called the success report once with the serialized progress report as the summary
self.conduit.build_success_report.assert_called_once_with(report.build_progress_report(), None)
# Inspect the conduit report
self.assertEqual(conduit_report.success_flag, True)
self.assertEqual(conduit_report.canceled_flag, False)
self.assertEqual(conduit_report.summary, report.build_progress_report())
self.assertEqual(conduit_report.details, None)
def test_build_final_report_cancelled(self):
"""
Test build_final_report() when the state is cancelled. Since the user asked for it to be
cancelled, we should report it as a success
"""
report = progress.ProgressReport(self.conduit,
state=progress.ProgressReport.STATE_CANCELED)
conduit_report = report.build_final_report()
# The failure report call should not have been made
self.assertEqual(self.conduit.build_failure_report.call_count, 0)
# We should have called the success report once with the serialized progress report as the
# summary
self.conduit.build_success_report.assert_called_once_with(report.build_progress_report(),
None)
# Inspect the conduit report
self.assertEqual(conduit_report.success_flag, True)
self.assertEqual(conduit_report.canceled_flag, False)
self.assertEqual(conduit_report.summary, report.build_progress_report())
self.assertEqual(conduit_report.details, None)
def test_build_progress_report(self):
"""
Test the build_progress_report() method.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime.utcnow()}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
report = report.build_progress_report()
# Make sure all the appropriate attributes were set
self.assertEqual(report['state'], state)
expected_state_times = {}
for key, value in state_times.items():
expected_state_times[key] = format_iso8601_datetime(value)
self.assertTrue(report['state_times'], expected_state_times)
self.assertEqual(report['error_message'], error_message)
self.assertEqual(report['traceback'], traceback)
def test_from_progress_report(self):
"""
Test that building an ProgressReport from the output of build_progress_report() makes an equivalent
ProgressReport.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime(2013, 5, 3, 20, 11, 3)}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
original_report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
serial_report = original_report.build_progress_report()
report = progress.ProgressReport.from_progress_report(serial_report)
# All of the values that we had set in the initial report should be identical on this one, except that
# the conduit should be None
self.assertEqual(report.conduit, None)
self.assertEqual(report._state, original_report.state)
self.assertEqual(report.state_times, original_report.state_times)
self.assertEqual(report.error_message, original_report.error_message)
self.assertEqual(report.traceback, original_report.traceback)
def test_update_progress(self):
"""
The update_progress() method should send the progress report to the conduit.
"""
state = progress.ProgressReport.STATE_FAILED
state_times = {progress.ProgressReport.STATE_FAILED: datetime.utcnow()}
error_message = 'This is an error message.'
traceback = 'This is a traceback.'
report = progress.ProgressReport(
self.conduit, state=state, state_times=state_times,
error_message=error_message, traceback=traceback)
report.update_progress()
# Make sure the conduit's set_progress() method was called
self.conduit.set_progress.assert_called_once_with(report.build_progress_report())
def test__get_state(self):
"""
Test our state property as a getter.
"""
report = progress.ProgressReport(None, state=progress.ProgressReport.STATE_COMPLETE)
self.assertEqual(report.state, progress.ProgressReport.STATE_COMPLETE)
# Normally, the ProgressReport doesn't have ALLOWED_STATE_TRANSITIONS, so let's give it one for this
# test
@mock.patch('pulp.common.plugins.progress.ProgressReport.ALLOWED_STATE_TRANSITIONS',
{'state_1': ['state_2']}, create=True)
def test__set_state_allowed_transition(self):
"""
Test the state property as a setter for an allowed state transition.
"""
report = progress.ProgressReport(self.conduit, state='state_1')
# This is an allowed transition, so it should not raise an error
report.state = 'state_2'
self.assertEqual(report._state, 'state_2')
self.assertTrue(report._state in report.state_times)
self.assertTrue(isinstance(report.state_times[report._state], datetime))
self.conduit.set_progress.assert_called_once_with(report.build_progress_report())
# Normally, the ProgressReport doesn't have ALLOWED_STATE_TRANSITIONS, so let's give it one for this
# test
@mock.patch('pulp.common.plugins.progress.ProgressReport.ALLOWED_STATE_TRANSITIONS',
{'state_1': ['state_2']}, create=True)
def test__set_state_disallowed_transition(self):
"""
Test the state property as a setter for a disallowed state transition.
"""
report = progress.ProgressReport(None, state='state_1')
# We can't go from state_1 to anything other than state_2
try:
report.state = 'state_3'
self.fail('The line above this should have raised an Exception, but it did not.')
except ValueError, e:
expected_error_substring = '%s --> %s' % (report.state, 'state_3')
self.assertTrue(expected_error_substring in str(e))
# The state should remain the same
self.assertEqual(report.state, 'state_1')
self.assertTrue('state_3' not in report.state_times)
# Normally, the ProgressReport doesn't have ALLOWED_STATE_TRANSITIONS, so let's give it one for this
# test
@mock.patch('pulp.common.plugins.progress.ProgressReport.ALLOWED_STATE_TRANSITIONS',
{'state_1': ['state_2']}, create=True)
def test__set_state_same_state(self):
"""
Test setting a state to the same state. This is weird, but allowed.
"""
report = progress.ProgressReport(None, state='state_1')
# This should not raise an Exception
report.state = 'state_1'
self.assertEqual(report.state, 'state_1')
def get_mock_conduit(type_id=None, existing_units=None, pkg_dir=None):
def build_failure_report(summary, details):
return PublishReport(False, summary, details)
def build_success_report(summary, details):
return PublishReport(True, summary, details)
"""
def side_effect(type_id, key, metadata, rel_path):
if rel_path and pkg_dir:
rel_path = os.path.join(pkg_dir, rel_path)
if not os.path.exists(os.path.dirname(rel_path)):
os.makedirs(os.path.dirname(rel_path))
unit = Unit(type_id, key, metadata, rel_path)
return unit
def get_units(criteria=None):
ret_val = []
if existing_units:
for u in existing_units:
if criteria:
if u.type_id in criteria.type_ids:
ret_val.append(u)
else:
ret_val.append(u)
return ret_val
def search_all_units(type_id, criteria):
ret_val = []
if existing_units:
for u in existing_units:
if u.type_id == type_id:
if u.unit_key['id'] == criteria['filters']['id']:
ret_val.append(u)
return ret_val
"""
sync_conduit = mock.Mock(spec=RepoSyncConduit)
#sync_conduit.init_unit.side_effect = side_effect
#sync_conduit.get_units.side_effect = get_units
sync_conduit.save_unit = mock.Mock()
#sync_conduit.search_all_units.side_effect = search_all_units
sync_conduit.build_failure_report = mock.MagicMock(side_effect=build_failure_report)
sync_conduit.build_success_report = mock.MagicMock(side_effect=build_success_report)
sync_conduit.set_progress = mock.MagicMock()
return sync_conduit
| gpl-2.0 |
abadger/ansible | test/support/windows-integration/plugins/modules/win_file.py | 52 | 2184 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
- Unlike M(file), does not modify ownership, permissions or manipulate links.
- For non-Windows targets, use the M(file) module instead.
options:
path:
description:
- Path to the file being managed.
required: yes
type: path
aliases: [ dest, name ]
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
- If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior.
- If C(absent), directories will be recursively deleted, and files will be removed.
- If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
choices: [ absent, directory, file, touch ]
seealso:
- module: file
- module: win_acl
- module: win_acl_inheritance
- module: win_owner
- module: win_stat
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\Temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\Temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\Temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\Temp
state: absent
'''
| gpl-3.0 |
azumimuo/family-xbmc-addon | script.mrknow.urlresolver/lib/urlresolver9/plugins/mp4engine.py | 4 | 1256 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class Mp4EngineResolver(UrlResolver):
name = "mp4engine"
domains = ["mp4engine.com"]
pattern = '(?://|\.)(mp4engine\.com)/(?:embed-)?([0-9a-zA-Z]+)(?:-[0-9]x[0-9].html)?'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id)).replace(" ", "%20")
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| gpl-2.0 |
torchingloom/edx-platform | common/djangoapps/terrain/stubs/start.py | 7 | 2687 | """
Command-line utility to start a stub service.
"""
import sys
import time
import logging
from .comments import StubCommentsService
from .xqueue import StubXQueueService
from .youtube import StubYouTubeService
from .ora import StubOraService
from .lti import StubLtiService
USAGE = "USAGE: python -m stubs.start SERVICE_NAME PORT_NUM [CONFIG_KEY=CONFIG_VAL, ...]"
SERVICES = {
'xqueue': StubXQueueService,
'youtube': StubYouTubeService,
'ora': StubOraService,
'comments': StubCommentsService,
'lti': StubLtiService,
}
# Log to stdout, including debug messages
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s %(message)s")
def get_args():
"""
Parse arguments, returning tuple of `(service_name, port_num, config_dict)`.
Exits with a message if arguments are invalid.
"""
if len(sys.argv) < 3:
print USAGE
sys.exit(1)
service_name = sys.argv[1]
port_num = sys.argv[2]
config_dict = _parse_config_args(sys.argv[3:])
if service_name not in SERVICES:
print "Unrecognized service '{0}'. Valid choices are: {1}".format(
service_name, ", ".join(SERVICES.keys()))
sys.exit(1)
try:
port_num = int(port_num)
if port_num < 0:
raise ValueError
except ValueError:
print "Port '{0}' must be a positive integer".format(port_num)
sys.exit(1)
return service_name, port_num, config_dict
def _parse_config_args(args):
"""
Parse stub configuration arguments, which are strings of the form "KEY=VAL".
`args` is a list of arguments from the command line.
Any argument that does not match the "KEY=VAL" format will be logged and skipped.
Returns a dictionary with the configuration keys and values.
"""
config_dict = dict()
for config_str in args:
try:
components = config_str.split('=')
if len(components) >= 2:
config_dict[components[0]] = "=".join(components[1:])
except:
print "Warning: could not interpret config value '{0}'".format(config_str)
pass
return config_dict
def main():
"""
Start a server; shut down on keyboard interrupt signal.
"""
service_name, port_num, config_dict = get_args()
print "Starting stub service '{0}' on port {1}...".format(service_name, port_num)
server = SERVICES[service_name](port_num=port_num)
server.config.update(config_dict)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Stopping stub service..."
finally:
server.shutdown()
if __name__ == "__main__":
main()
| agpl-3.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/json/decoder.py | 89 | 12763 | """Implementation of JSONDecoder
"""
import re
from json import scanner
try:
from _json import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
NaN = float('nan')
PosInf = float('inf')
NegInf = float('-inf')
def linecol(doc, pos):
if isinstance(doc, bytes):
newline = b'\n'
else:
newline = '\n'
lineno = doc.count(newline, 0, pos) + 1
if lineno == 1:
colno = pos + 1
else:
colno = pos - doc.rindex(newline, 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _json
lineno, colno = linecol(doc, pos)
if end is None:
fmt = '{0}: line {1} column {2} (char {3})'
return fmt.format(msg, lineno, colno, pos)
#fmt = '%s: line %d column %d (char %d)'
#return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
#fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
#return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': '"', '\\': '\\', '/': '/',
'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t',
}
def _decode_uXXXX(s, pos):
esc = s[pos + 1:pos + 5]
if len(esc) == 4 and esc[1] not in 'xX':
try:
return int(esc, 16)
except ValueError:
pass
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, pos))
def py_scanstring(s, end, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
#msg = "Invalid control character %r at" % (terminator,)
msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: {0!r}".format(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
uni = _decode_uXXXX(s, end)
end += 5
if 0xd800 <= uni <= 0xdbff and s[end:end + 2] == '\\u':
uni2 = _decode_uXXXX(s, end + 1)
if 0xdc00 <= uni2 <= 0xdfff:
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
end += 6
char = chr(uni)
_append(char)
return ''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(s_and_end, strict, scan_once, object_hook, object_pairs_hook,
memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
pairs = []
pairs_append = pairs.append
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg(
"Expecting property name enclosed in double quotes", s, end))
end += 1
while True:
key, end = scanstring(s, end, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting ':' delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration as err:
raise ValueError(errmsg("Expecting value", s, err.value)) from None
pairs_append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg(
"Expecting property name enclosed in double quotes", s, end - 1))
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration as err:
raise ValueError(errmsg("Expecting value", s, err.value)) from None
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | str |
+---------------+-------------------+
| number (int) | int |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``object_pairs_hook``, if specified will be called with the result of
every JSON object decoded with an ordered list of pairs. The return
value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes
priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
If ``strict`` is false (true is the default), then control
characters will be allowed inside strings. Control characters in
this context are those with character codes in the 0-31 range,
including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``.
"""
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.object_pairs_hook = object_pairs_hook
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = scanner.make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` instance
containing a JSON document).
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` beginning with
a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration as err:
raise ValueError(errmsg("Expecting value", s, err.value)) from None
return obj, end
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.