repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
agualis/test-django-nonrel
django/core/management/commands/startproject.py
322
1680
from django.core.management.base import copy_helper, CommandError, LabelCommand from django.utils.importlib import import_module import os import re from random import choice class Command(LabelCommand): help = "Creates a Django project directory structure for the given project name in the current directory." args = "[projectname]" label = 'project name' requires_model_validation = False # Can't import settings during this command, because they haven't # necessarily been created. can_import_settings = False def handle_label(self, project_name, **options): # Determine the project_name a bit naively -- by looking at the name of # the parent directory. directory = os.getcwd() # Check that the project_name cannot be imported. try: import_module(project_name) except ImportError: pass else: raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name) copy_helper(self.style, 'project', project_name, directory) # Create a random SECRET_KEY hash, and put it in the main settings. main_settings_file = os.path.join(directory, project_name, 'settings.py') settings_contents = open(main_settings_file, 'r').read() fp = open(main_settings_file, 'w') secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)]) settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents) fp.write(settings_contents) fp.close()
bsd-3-clause
WalkingMachine/sara_behaviors
sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_receive_bag_sm.py
1
3679
#!/usr/bin/env python # -*- coding: utf-8 -*- ########################################################### # WARNING: Generated code! # # ************************** # # Manual changes may get lost if file is generated again. # # Only code inside the [MANUAL] tags will be kept. # ########################################################### from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger from sara_flexbe_states.run_trajectory import RunTrajectory from sara_flexbe_states.set_gripper_state import SetGripperState from sara_flexbe_states.sara_say import SaraSay from sara_flexbe_states.torque_reader import ReadTorque # Additional imports can be added inside the following tags # [MANUAL_IMPORT] # [/MANUAL_IMPORT] ''' Created on Thu Jul 27 2017 @author: Redouane Laref ''' class Action_Receive_BagSM(Behavior): ''' Action for receiving the bag for help me carry scenario. ''' def __init__(self): super(Action_Receive_BagSM, self).__init__() self.name = 'Action_Receive_Bag' # parameters of this behavior # references to used behaviors # Additional initialization code can be added inside the following tags # [MANUAL_INIT] # [/MANUAL_INIT] # Behavior comments: # O 128 372 # Prend le sac et le rapporte dans son idle pose def create(self): # x:867 y:64, x:469 y:60 _state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['Closed_Gripper_Width', 'Open_Gripper_Width', 'Closed_Gripper_Width']) _state_machine.userdata.Closed_Gripper_Width = 1 _state_machine.userdata.Open_Gripper_Width = 255 _state_machine.userdata.effort = 50 # Additional creation code can be added inside the following tags # [MANUAL_CREATE] # [/MANUAL_CREATE] with _state_machine: # x:101 y:292 OperatableStateMachine.add('place arm', RunTrajectory(file="receive_bag", duration=6), transitions={'done': 'opengripper'}, autonomy={'done': Autonomy.Off}) # x:468 y:286 OperatableStateMachine.add('close_gripper', SetGripperState(width=0, effort=1), transitions={'object': 'thank you', 'no_object': 'thank you'}, autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off}, remapping={'object_size': 'object_size'}) # x:638 y:216 OperatableStateMachine.add('thank you', SaraSay(sentence="Thank you", input_keys=[], emotion=1, block=True), transitions={'done': 'place back arm'}, autonomy={'done': Autonomy.Off}) # x:653 y:81 OperatableStateMachine.add('place back arm', RunTrajectory(file="sac_transport", duration=0), transitions={'done': 'finished'}, autonomy={'done': Autonomy.Off}) # x:263 y:293 OperatableStateMachine.add('Torque_Reader', ReadTorque(watchdog=5, Joint="right_elbow_pitch_joint", Threshold=0.5, min_time=1), transitions={'threshold': 'close_gripper', 'watchdog': 'close_gripper', 'fail': 'failed'}, autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off}, remapping={'torque': 'torque'}) # x:196 y:148 OperatableStateMachine.add('opengripper', SetGripperState(width=0.25, effort=1), transitions={'object': 'Torque_Reader', 'no_object': 'Torque_Reader'}, autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off}, remapping={'object_size': 'object_size'}) return _state_machine # Private functions can be added inside the following tags # [MANUAL_FUNC] # [/MANUAL_FUNC]
bsd-3-clause
hwu25/AppPkg
Applications/Python/Python-2.7.2/Lib/test/test_bsddb.py
10
12031
#! /usr/bin/env python """Test script for the bsddb C module by Roger E. Masse Adapted to unittest format and expanded scope by Raymond Hettinger """ import os, sys import unittest from test import test_support # Skip test if _bsddb wasn't built. test_support.import_module('_bsddb') bsddb = test_support.import_module('bsddb', deprecated=True) # Just so we know it's imported: test_support.import_module('dbhash', deprecated=True) class TestBSDDB(unittest.TestCase): openflag = 'c' def setUp(self): self.f = self.openmethod[0](self.fname, self.openflag, cachesize=32768) self.d = dict(q='Guido', w='van', e='Rossum', r='invented', t='Python', y='') for k, v in self.d.iteritems(): self.f[k] = v def tearDown(self): self.f.sync() self.f.close() if self.fname is None: return try: os.remove(self.fname) except os.error: pass def test_getitem(self): for k, v in self.d.iteritems(): self.assertEqual(self.f[k], v) def test_len(self): self.assertEqual(len(self.f), len(self.d)) def test_change(self): self.f['r'] = 'discovered' self.assertEqual(self.f['r'], 'discovered') self.assertIn('r', self.f.keys()) self.assertIn('discovered', self.f.values()) def test_close_and_reopen(self): if self.fname is None: # if we're using an in-memory only db, we can't reopen it # so finish here. return self.f.close() self.f = self.openmethod[0](self.fname, 'w') for k, v in self.d.iteritems(): self.assertEqual(self.f[k], v) def assertSetEquals(self, seqn1, seqn2): self.assertEqual(set(seqn1), set(seqn2)) def test_mapping_iteration_methods(self): f = self.f d = self.d self.assertSetEquals(d, f) self.assertSetEquals(d.keys(), f.keys()) self.assertSetEquals(d.values(), f.values()) self.assertSetEquals(d.items(), f.items()) self.assertSetEquals(d.iterkeys(), f.iterkeys()) self.assertSetEquals(d.itervalues(), f.itervalues()) self.assertSetEquals(d.iteritems(), f.iteritems()) def test_iter_while_modifying_values(self): di = iter(self.d) while 1: try: key = di.next() self.d[key] = 'modified '+key except StopIteration: break # it should behave the same as a dict. modifying values # of existing keys should not break iteration. (adding # or removing keys should) loops_left = len(self.f) fi = iter(self.f) while 1: try: key = fi.next() self.f[key] = 'modified '+key loops_left -= 1 except StopIteration: break self.assertEqual(loops_left, 0) self.test_mapping_iteration_methods() def test_iter_abort_on_changed_size(self): def DictIterAbort(): di = iter(self.d) while 1: try: di.next() self.d['newkey'] = 'SPAM' except StopIteration: break self.assertRaises(RuntimeError, DictIterAbort) def DbIterAbort(): fi = iter(self.f) while 1: try: fi.next() self.f['newkey'] = 'SPAM' except StopIteration: break self.assertRaises(RuntimeError, DbIterAbort) def test_iteritems_abort_on_changed_size(self): def DictIteritemsAbort(): di = self.d.iteritems() while 1: try: di.next() self.d['newkey'] = 'SPAM' except StopIteration: break self.assertRaises(RuntimeError, DictIteritemsAbort) def DbIteritemsAbort(): fi = self.f.iteritems() while 1: try: key, value = fi.next() del self.f[key] except StopIteration: break self.assertRaises(RuntimeError, DbIteritemsAbort) def test_iteritems_while_modifying_values(self): di = self.d.iteritems() while 1: try: k, v = di.next() self.d[k] = 'modified '+v except StopIteration: break # it should behave the same as a dict. modifying values # of existing keys should not break iteration. (adding # or removing keys should) loops_left = len(self.f) fi = self.f.iteritems() while 1: try: k, v = fi.next() self.f[k] = 'modified '+v loops_left -= 1 except StopIteration: break self.assertEqual(loops_left, 0) self.test_mapping_iteration_methods() def test_first_next_looping(self): items = [self.f.first()] for i in xrange(1, len(self.f)): items.append(self.f.next()) self.assertSetEquals(items, self.d.items()) def test_previous_last_looping(self): items = [self.f.last()] for i in xrange(1, len(self.f)): items.append(self.f.previous()) self.assertSetEquals(items, self.d.items()) def test_first_while_deleting(self): # Test for bug 1725856 self.assertTrue(len(self.d) >= 2, "test requires >=2 items") for _ in self.d: key = self.f.first()[0] del self.f[key] self.assertEqual([], self.f.items(), "expected empty db after test") def test_last_while_deleting(self): # Test for bug 1725856's evil twin self.assertTrue(len(self.d) >= 2, "test requires >=2 items") for _ in self.d: key = self.f.last()[0] del self.f[key] self.assertEqual([], self.f.items(), "expected empty db after test") def test_set_location(self): self.assertEqual(self.f.set_location('e'), ('e', self.d['e'])) def test_contains(self): for k in self.d: self.assertIn(k, self.f) self.assertNotIn('not here', self.f) def test_has_key(self): for k in self.d: self.assertTrue(self.f.has_key(k)) self.assertTrue(not self.f.has_key('not here')) def test_clear(self): self.f.clear() self.assertEqual(len(self.f), 0) def test__no_deadlock_first(self, debug=0): # do this so that testers can see what function we're in in # verbose mode when we deadlock. sys.stdout.flush() # in pybsddb's _DBWithCursor this causes an internal DBCursor # object is created. Other test_ methods in this class could # inadvertently cause the deadlock but an explicit test is needed. if debug: print "A" k,v = self.f.first() if debug: print "B", k self.f[k] = "deadlock. do not pass go. do not collect $200." if debug: print "C" # if the bsddb implementation leaves the DBCursor open during # the database write and locking+threading support is enabled # the cursor's read lock will deadlock the write lock request.. # test the iterator interface if True: if debug: print "D" i = self.f.iteritems() k,v = i.next() if debug: print "E" self.f[k] = "please don't deadlock" if debug: print "F" while 1: try: k,v = i.next() except StopIteration: break if debug: print "F2" i = iter(self.f) if debug: print "G" while i: try: if debug: print "H" k = i.next() if debug: print "I" self.f[k] = "deadlocks-r-us" if debug: print "J" except StopIteration: i = None if debug: print "K" # test the legacy cursor interface mixed with writes self.assertIn(self.f.first()[0], self.d) k = self.f.next()[0] self.assertIn(k, self.d) self.f[k] = "be gone with ye deadlocks" self.assertTrue(self.f[k], "be gone with ye deadlocks") def test_for_cursor_memleak(self): # do the bsddb._DBWithCursor iterator internals leak cursors? nc1 = len(self.f._cursor_refs) # create iterator i = self.f.iteritems() nc2 = len(self.f._cursor_refs) # use the iterator (should run to the first yield, creating the cursor) k, v = i.next() nc3 = len(self.f._cursor_refs) # destroy the iterator; this should cause the weakref callback # to remove the cursor object from self.f._cursor_refs del i nc4 = len(self.f._cursor_refs) self.assertEqual(nc1, nc2) self.assertEqual(nc1, nc4) self.assertTrue(nc3 == nc1+1) def test_popitem(self): k, v = self.f.popitem() self.assertIn(k, self.d) self.assertIn(v, self.d.values()) self.assertNotIn(k, self.f) self.assertEqual(len(self.d)-1, len(self.f)) def test_pop(self): k = 'w' v = self.f.pop(k) self.assertEqual(v, self.d[k]) self.assertNotIn(k, self.f) self.assertNotIn(v, self.f.values()) self.assertEqual(len(self.d)-1, len(self.f)) def test_get(self): self.assertEqual(self.f.get('NotHere'), None) self.assertEqual(self.f.get('NotHere', 'Default'), 'Default') self.assertEqual(self.f.get('q', 'Default'), self.d['q']) def test_setdefault(self): self.assertEqual(self.f.setdefault('new', 'dog'), 'dog') self.assertEqual(self.f.setdefault('r', 'cat'), self.d['r']) def test_update(self): new = dict(y='life', u='of', i='brian') self.f.update(new) self.d.update(new) for k, v in self.d.iteritems(): self.assertEqual(self.f[k], v) def test_keyordering(self): if self.openmethod[0] is not bsddb.btopen: return keys = self.d.keys() keys.sort() self.assertEqual(self.f.first()[0], keys[0]) self.assertEqual(self.f.next()[0], keys[1]) self.assertEqual(self.f.last()[0], keys[-1]) self.assertEqual(self.f.previous()[0], keys[-2]) self.assertEqual(list(self.f), keys) class TestBTree(TestBSDDB): fname = test_support.TESTFN openmethod = [bsddb.btopen] class TestBTree_InMemory(TestBSDDB): fname = None openmethod = [bsddb.btopen] class TestBTree_InMemory_Truncate(TestBSDDB): fname = None openflag = 'n' openmethod = [bsddb.btopen] class TestHashTable(TestBSDDB): fname = test_support.TESTFN openmethod = [bsddb.hashopen] class TestHashTable_InMemory(TestBSDDB): fname = None openmethod = [bsddb.hashopen] ## # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85 ## # appears broken... at least on ## # Solaris Intel - rmasse 1/97 def test_main(verbose=None): test_support.run_unittest( TestBTree, TestHashTable, TestBTree_InMemory, TestHashTable_InMemory, TestBTree_InMemory_Truncate, ) if __name__ == "__main__": test_main(verbose=True)
bsd-2-clause
evax/ansible-modules-core
system/authorized_key.py
55
15877
#!/usr/bin/python # -*- coding: utf-8 -*- """ Ansible module to add authorized_keys for ssh logins. (c) 2012, Brad Olson <brado@movedbylight.com> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ DOCUMENTATION = ''' --- module: authorized_key short_description: Adds or removes an SSH authorized key description: - Adds or removes an SSH authorized key for a user from a remote host. version_added: "0.5" options: user: description: - The username on the remote host whose authorized_keys file will be modified required: true default: null key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) required: true default: null path: description: - Alternate path to the authorized_keys file required: false default: "(homedir)+/.ssh/authorized_keys" version_added: "1.2" manage_dir: description: - Whether this module should manage the directory of the authorized key file. If set, the module will create the directory, as well as set the owner and permissions of an existing directory. Be sure to set C(manage_dir=no) if you are using an alternate directory for authorized_keys, as set with C(path), since you could lock yourself out of SSH access. See the example below. required: false choices: [ "yes", "no" ] default: "yes" version_added: "1.2" state: description: - Whether the given key (with the given key_options) should or should not be in the file required: false choices: [ "present", "absent" ] default: "present" key_options: description: - A string of ssh key options to be prepended to the key in the authorized_keys file required: false default: null version_added: "1.4" exclusive: description: - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys can be specified in a single C(key) string value by separating them by newlines. - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a single batch as mentioned above. required: false choices: [ "yes", "no" ] default: "no" version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" author: "Brad Olson (@bradobro)" ''' EXAMPLES = ''' # Example using key data from a local file on the management machine - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" # Using github url as key source - authorized_key: user=charlie key=https://github.com/charlie.keys # Using alternate directory locations: - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" path='/etc/ssh/authorized_keys/charlie' manage_dir=no # Using with_file - name: Set up authorized_keys for the deploy user authorized_key: user=deploy key="{{ item }}" with_file: - public_keys/doe-jane - public_keys/doe-john # Using key_options: - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options='no-port-forwarding,from="10.0.1.1"' # Set up authorized_keys exclusively with one key - authorized_key: user=root key="{{ item }}" state=present exclusive=yes with_file: - public_keys/doe-jane ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. # # Arguments # ========= # user = username # key = line to add to authorized_keys for user # path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys) # manage_dir = whether to create, and control ownership of the directory (default: true) # state = absent|present (default: present) # # see example in examples/playbooks import sys import os import pwd import os.path import tempfile import re import shlex class keydict(dict): """ a dictionary that maintains the order of keys as they are added """ # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): super(keydict,self).__init__(*args, **kw) self.itemlist = super(keydict,self).keys() def __setitem__(self, key, value): self.itemlist.append(key) super(keydict,self).__setitem__(key, value) def __iter__(self): return iter(self.itemlist) def keys(self): return self.itemlist def values(self): return [self[key] for key in self] def itervalues(self): return (self[key] for key in self) def keyfile(module, user, write=False, path=None, manage_dir=True): """ Calculate name of authorized keys file, optionally creating the directories and file, properly setting permissions. :param str user: name of user in passwd file :param bool write: if True, write changes to authorized_keys file (creating directories if needed) :param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys' :param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file :return: full path string to authorized_keys for user """ if module.check_mode and path is not None: keysfile = path return keysfile try: user_entry = pwd.getpwnam(user) except KeyError, e: if module.check_mode and path is None: module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode") module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) if path is None: homedir = user_entry.pw_dir sshdir = os.path.join(homedir, ".ssh") keysfile = os.path.join(sshdir, "authorized_keys") else: sshdir = os.path.dirname(path) keysfile = path if not write: return keysfile uid = user_entry.pw_uid gid = user_entry.pw_gid if manage_dir: if not os.path.exists(sshdir): os.mkdir(sshdir, 0700) if module.selinux_enabled(): module.set_default_selinux_context(sshdir, False) os.chown(sshdir, uid, gid) os.chmod(sshdir, 0700) if not os.path.exists(keysfile): basedir = os.path.dirname(keysfile) if not os.path.exists(basedir): os.makedirs(basedir) try: f = open(keysfile, "w") #touches file so we can set ownership and perms finally: f.close() if module.selinux_enabled(): module.set_default_selinux_context(keysfile, False) try: os.chown(keysfile, uid, gid) os.chmod(keysfile, 0600) except OSError: pass return keysfile def parseoptions(module, options): ''' reads a string containing ssh-key options and returns a dictionary of those options ''' options_dict = keydict() #ordered dict if options: try: # the following regex will split on commas while # ignoring those commas that fall within quotes regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') parts = regex.split(options)[1:-1] for part in parts: if "=" in part: (key, value) = part.split("=", 1) options_dict[key] = value elif part != ",": options_dict[part] = None except: module.fail_json(msg="invalid option string: %s" % options) return options_dict def parsekey(module, raw_key): ''' parses a key, which may or may not contain a list of ssh-key options at the beginning ''' VALID_SSH2_KEY_TYPES = [ 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', 'ecdsa-sha2-nistp521', 'ssh-dss', 'ssh-rsa', ] options = None # connection options key = None # encrypted key string key_type = None # type of ssh key type_index = None # index of keytype in key string|list # remove comment yaml escapes raw_key = raw_key.replace('\#', '#') # split key safely lex = shlex.shlex(raw_key) lex.quotes = [] lex.commenters = '' #keep comment hashes lex.whitespace_split = True key_parts = list(lex) for i in range(0, len(key_parts)): if key_parts[i] in VALID_SSH2_KEY_TYPES: type_index = i key_type = key_parts[i] break # check for options if type_index is None: return None elif type_index > 0: options = " ".join(key_parts[:type_index]) # parse the options (if any) options = parseoptions(module, options) # get key after the type index key = key_parts[(type_index + 1)] # set comment to everything after the key if len(key_parts) > (type_index + 1): comment = " ".join(key_parts[(type_index + 2):]) return (key, key_type, options, comment) def readkeys(module, filename): if not os.path.isfile(filename): return {} keys = {} f = open(filename) for line in f.readlines(): key_data = parsekey(module, line) if key_data: # use key as identifier keys[key_data[0]] = key_data else: # for an invalid line, just append the line # to the array so it will be re-output later keys[line] = line f.close() return keys def writekeys(module, filename, keys): fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename)) f = open(tmp_path,"w") try: for index, key in keys.items(): try: (keyhash,type,options,comment) = key option_str = "" if options: option_strings = [] for option_key in options.keys(): if options[option_key]: option_strings.append("%s=%s" % (option_key, options[option_key])) else: option_strings.append("%s" % option_key) option_str = ",".join(option_strings) option_str += " " key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment) except: key_line = key f.writelines(key_line) except IOError, e: module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) f.close() module.atomic_move(tmp_path, filename) def enforce_state(module, params): """ Add or remove key. """ user = params["user"] key = params["key"] path = params.get("path", None) manage_dir = params.get("manage_dir", True) state = params.get("state", "present") key_options = params.get("key_options", None) exclusive = params.get("exclusive", False) error_msg = "Error getting key from: %s" # if the key is a url, request it and use it as key source if key.startswith("http"): try: resp, info = fetch_url(module, key) if info['status'] != 200: module.fail_json(msg=error_msg % key) else: key = resp.read() except Exception: module.fail_json(msg=error_msg % key) # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] # check current state -- just get the filename, don't create file do_write = False params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) existing_keys = readkeys(module, params["keyfile"]) # Add a place holder for keys that should exist in the state=present and # exclusive=true case keys_to_exist = [] # Check our new keys, if any of them exist we'll continue. for new_key in key: parsed_new_key = parsekey(module, new_key) if not parsed_new_key: module.fail_json(msg="invalid key specified: %s" % new_key) if key_options is not None: parsed_options = parseoptions(module, key_options) parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3]) present = False matched = False non_matching_keys = [] if parsed_new_key[0] in existing_keys: present = True # Then we check if everything matches, including # the key type and options. If not, we append this # existing key to the non-matching list # We only want it to match everything when the state # is present if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present": non_matching_keys.append(existing_keys[parsed_new_key[0]]) else: matched = True # handle idempotent state=present if state=="present": keys_to_exist.append(parsed_new_key[0]) if len(non_matching_keys) > 0: for non_matching_key in non_matching_keys: if non_matching_key[0] in existing_keys: del existing_keys[non_matching_key[0]] do_write = True if not matched: existing_keys[parsed_new_key[0]] = parsed_new_key do_write = True elif state=="absent": if not matched: continue del existing_keys[parsed_new_key[0]] do_write = True # remove all other keys to honor exclusive if state == "present" and exclusive: to_remove = frozenset(existing_keys).difference(keys_to_exist) for key in to_remove: del existing_keys[key] do_write = True if do_write: if module.check_mode: module.exit_json(changed=True) writekeys(module, keyfile(module, user, do_write, path, manage_dir), existing_keys) params['changed'] = True else: if module.check_mode: module.exit_json(changed=False) return params def main(): module = AnsibleModule( argument_spec = dict( user = dict(required=True, type='str'), key = dict(required=True, type='str'), path = dict(required=False, type='str'), manage_dir = dict(required=False, type='bool', default=True), state = dict(default='present', choices=['absent','present']), key_options = dict(required=False, type='str'), unique = dict(default=False, type='bool'), exclusive = dict(default=False, type='bool'), ), supports_check_mode=True ) results = enforce_state(module, module.params) module.exit_json(**results) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main()
gpl-3.0
fparrel/regepe
vps/gpxparser.py
1
8080
# For xml parsing try: from etree.ElementTree import ElementTree except ImportError: from xml.etree.ElementTree import ElementTree # For date parsing from datetime import datetime import time # Model classes from model import Bounds,Point,Track #i18n from flask_babel import gettext # gpx creator="KeyMaze 500-700 PC Software" -> spdunit = hectometres/heure class GpxPoint: def __init__(self,trkptxmlelement,xmlns): "Create a Point from a gpx:pt xmlelement and the xml namespace" self.lat = float(trkptxmlelement.get('lat')) self.lon = float(trkptxmlelement.get('lon')) elestr = trkptxmlelement.findtext(xmlns+'ele') if not elestr==None: # replace , by . in case bad formating (MobiDream) self.ele = float(elestr.replace(',','.')) else: self.ele = None spdstr = trkptxmlelement.findtext(xmlns+'speed') if not spdstr==None: self.spd = float(spdstr.replace(',','.')) else: self.spd = None coursestr = trkptxmlelement.findtext(xmlns+'course') if not coursestr==None: self.course = float(coursestr.replace(',','.')) else: self.course = None datetimestr = trkptxmlelement.findtext(xmlns+'time') if not datetimestr==None: # date in format YY-mm-dd if datetimestr.find('T')==8: datetimestr = '20' + datetimestr datetimestr = datetimestr[:19] # Fix a GPS Action Replay bug if not datetimestr.find('Z')==-1: datetimestr = datetimestr[:datetimestr.find('Z')] try: # Python > 2.4 self.datetime = datetime.strptime(datetimestr,'%Y-%m-%dT%H:%M:%S') except AttributeError: try: # Python 2.4 self.datetime = datetime(*(time.strptime(datetimestr,'%Y-%m-%dT%H:%M:%S')[0:6])) except ValueError: raise Exception(gettext('Cannot convert date %s') % datetimestr) else: self.datetime = None for e in trkptxmlelement: if e.tag==xmlns+'extensions': for sube in e: if sube.tag.endswith('TrackPointExtension'): for subsube in sube: if subsube.tag.endswith('hr'): self.hr = int(subsube.text) def ToPoint(self): pt = Point(self.lat,self.lon,self.ele,self.spd,self.course,self.datetime) if hasattr(self,'hr'): pt.hr = self.hr return pt class GpxTrkSeg: "Keeps a Track Segement (list of points) got from a gpx file (trkseg tag)" # ptlist: list of points # bounds: bounds of the list of points def __init__(self,trksegxmlelement,xmlns): "Create a TrackSeg from a gpx:trkseg xmlelement and the xml namespace" self.bounds = Bounds() self.ptlist = [] for trkpt in trksegxmlelement: pt = GpxPoint(trkpt,xmlns) if not(pt.lat==0.0 and pt.lon==0.0): # Fix for Garmin Connect's bug self.ptlist.append(pt) self.bounds.Extend(pt.lat,pt.lon) def __add__(self,other): out = GpxTrkSeg([],'') out.ptlist.extend(self.ptlist) out.ptlist.extend(other.ptlist) for pt in out.ptlist: out.bounds.Extend(pt.lat,pt.lon) return out class GpxTrack: "Keeps a Track got from a gpx file (trk tag)" # trkseglist: list of track segment # name: name of the track # bounds: bounds of track def __init__(self,trkxmlelement,xmlns): self.bounds = Bounds() self.trkseglist = [] index = 0 for e in trkxmlelement: if e.tag==xmlns+'name': self.name = e.text if e.tag==xmlns+'trkseg': index = index + 1 trackseg = GpxTrkSeg(e,xmlns) self.trkseglist.append(trackseg) self.bounds.Extend(trackseg.bounds) class GpxRoutePoint: # lat # lon # ele # name def __init__(self,rteptxmlelement,xmlns): "Create a Route Point from a gpx:rtept or a gpx:wpt xmlelement and the xml namespace" self.lat = float(rteptxmlelement.get('lat')) self.lon = float(rteptxmlelement.get('lon')) elestr = rteptxmlelement.findtext(xmlns+'ele') if not elestr==None: # replace , by . in case bad formating (MobiDream) self.ele = float(elestr.replace(',','.')) else: self.ele = None self.name = rteptxmlelement.findtext(xmlns+'name') def ToPoint(self): return Point(self.lat,self.lon,self.ele,None,None,None) def __str__(self): return 'GpxRoutePoint(%f,%f,%f,%s)'%(self.lat,self.lon,self.ele,self.name) class GpxRoute: "Keeps a Route got from a gpx file (rte tag)" # ptlist: list of GpxRoutePt # name: name of the route # bounds: bounds of route def __init__(self,rtexmlelement,xmlns): self.ptlist = [] self.bounds = Bounds() for e in rtexmlelement: if e.tag==xmlns+'name': self.name = e.text elif e.tag==xmlns+'rtept': pt = GpxRoutePoint(e,xmlns) self.ptlist.append(pt) self.bounds.Extend(pt.lat,pt.lon) class GpxWpts: def __init__(self,rtexmlelement,xmlns): self.ptlist = [] self.bounds = Bounds() for e in rtexmlelement: if e.tag==xmlns+'wpt': pt = GpxRoutePoint(e,xmlns) self.ptlist.append(pt) self.bounds.Extend(pt.lat,pt.lon) class GpxFile: "Keeps data contained in a gpx file (gpx tag)" # tracklist: list of GpxTrack # routelist: list of GpxRoute # bounds: bounds of gpx file def ParseBounds(self,boundsxmlelement): self.bounds.minlat = float(boundsxmlelement.get('minlat')) self.bounds.maxlat = float(boundsxmlelement.get('maxlat')) self.bounds.minlon = float(boundsxmlelement.get('minlon')) self.bounds.maxlon = float(boundsxmlelement.get('maxlon')) def __init__(self,gpxxmlelement,xmlns): self.bounds = Bounds() self.tracklist = [] self.routelist = [] for e in gpxxmlelement: if e.tag==xmlns+'bounds': self.ParseBounds(e) if e.tag==xmlns+'trk': track = GpxTrack(e,xmlns) self.tracklist.append(track) self.bounds.Extend(track.bounds) if e.tag==xmlns+'rte': route = GpxRoute(e,xmlns) self.routelist.append(route) if e.tag==xmlns+'wpt': route = GpxWpts(gpxxmlelement,xmlns) self.routelist.append(route) def ParseGpxFile(inputfile,trk_id,trk_seg_id): tree = ElementTree() tree.parse(inputfile) xmlns = str(tree.getroot()) xmlns = xmlns[xmlns.find('{'):xmlns.find('}')+1] gpx = GpxFile(tree.getroot(),xmlns) if len(gpx.tracklist)<1: # Try with <rte> if len(gpx.routelist)<1: raise Exception(gettext('No track found in file')) else: return map(GpxRoutePoint.ToPoint,gpx.routelist[trk_id].ptlist) return map(GpxPoint.ToPoint,reduce(lambda x,y:x+y,gpx.tracklist[trk_id].trkseglist).ptlist) #return map(GpxPoint.ToPoint,gpx.tracklist[trk_id].trkseglist[trk_seg_id].ptlist) ## UNIT TEST CODE ## def main(): #ptlist = ParseGpxFile('D:/Documents/Downloads/Racemment_importa_de_JAN0712_181820.GPX',0,0) ptlist = ParseGpxFile('D:/Userfiles/fparrel/Downloads/2015-01-12 1951__20150112_1951.gpx',0,0) #ptlist = ParseGpxFile('gpx/FPARREL_832004951_20091022_172903.gpx',0,0) #ptlist = ParseGpxFile('Test.gpx',0,0) for pt in ptlist: print pt if hasattr(pt,'hr'): print pt.hr #raw_input('Press Enter') if __name__ == '__main__': main()
gpl-3.0
ar7z1/ansible
lib/ansible/modules/packaging/os/pacman.py
36
14590
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Afterburn <https://github.com/afterburn> # Copyright: (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com> # Copyright: (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: pacman short_description: Manage packages with I(pacman) description: - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. version_added: "1.0" author: - Indrajit Raychaudhuri (@indrajitr) - Aaron Bull Schaefer (@elasticdog) <aaron@elasticdog.com> - Afterburn options: name: description: - Name or list of names of the packages to install, upgrade, or remove. aliases: [ package, pkg ] state: description: - Desired state of the package. default: present choices: [ absent, latest, present ] recurse: description: - When removing a package, also remove its dependencies, provided that they are not required by other packages and were not explicitly installed by a user. type: bool default: no version_added: "1.3" force: description: - When removing package - force remove package, without any checks. When update_cache - force redownload repo databases. type: bool default: no version_added: "2.0" update_cache: description: - Whether or not to refresh the master package lists. This can be run as part of a package installation or as a separate step. type: bool default: no aliases: [ update-cache ] upgrade: description: - Whether or not to upgrade whole system. type: bool default: no version_added: "2.0" notes: - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. ''' RETURN = ''' packages: description: a list of packages that have been changed returned: when upgrade is set to yes type: list sample: [ package, other-package ] ''' EXAMPLES = ''' - name: Install package foo pacman: name: foo state: present - name: Upgrade package foo pacman: name: foo state: latest update_cache: yes - name: Remove packages foo and bar pacman: name: foo,bar state: absent - name: Recursively remove package baz pacman: name: baz state: absent recurse: yes - name: Run the equivalent of "pacman -Sy" as a separate step pacman: update_cache: yes - name: Run the equivalent of "pacman -Su" as a separate step pacman: upgrade: yes - name: Run the equivalent of "pacman -Syu" as a separate step pacman: update_cache: yes upgrade: yes - name: Run the equivalent of "pacman -Rdd", force remove package baz pacman: name: baz state: absent force: yes ''' import re from ansible.module_utils.basic import AnsibleModule def get_version(pacman_output): """Take pacman -Qi or pacman -Si output and get the Version""" lines = pacman_output.split('\n') for line in lines: if 'Version' in line: return line.split(':')[1].strip() return None def query_package(module, pacman_path, name, state="present"): """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available """ if state == "present": lcmd = "%s -Qi %s" % (pacman_path, name) lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) if lrc != 0: # package is not installed locally return False, False, False # get the version installed locally (if any) lversion = get_version(lstdout) rcmd = "%s -Si %s" % (pacman_path, name) rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) # get the version in the repository rversion = get_version(rstdout) if rrc == 0: # Return True to indicate that the package is installed locally, and the result of the version number comparison # to determine if the package is up-to-date. return True, (lversion == rversion), False # package is installed but cannot fetch remote Version. Last True stands for the error return True, True, True def update_package_db(module, pacman_path): if module.params["force"]: args = "Syy" else: args = "Sy" cmd = "%s -%s" % (pacman_path, args) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True else: module.fail_json(msg="could not update package db") def upgrade(module, pacman_path): cmdupgrade = "%s -Suq --noconfirm" % (pacman_path) cmdneedrefresh = "%s -Qu" % (pacman_path) rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False) data = stdout.split('\n') data.remove('') packages = [] diff = { 'before': '', 'after': '', } if rc == 0: regex = re.compile(r'([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))') for p in data: m = regex.search(p) packages.append(m.group(1)) if module._diff: diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) if module.check_mode: module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) if rc == 0: module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) else: module.fail_json(msg="Could not upgrade") else: module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) def remove_packages(module, pacman_path, packages): data = [] diff = { 'before': '', 'after': '', } if module.params["recurse"] or module.params["force"]: if module.params["recurse"]: args = "Rs" if module.params["force"]: args = "Rdd" if module.params["recurse"] and module.params["force"]: args = "Rdds" else: args = "R" remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove installed, updated, unknown = query_package(module, pacman_path, package) if not installed: continue cmd = "%s -%s %s --noconfirm --noprogressbar" % (pacman_path, args, package) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) if module._diff: d = stdout.split('\n')[2].split(' ')[2:] for i, pkg in enumerate(d): d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1]) diff['before'] += "%s\n" % pkg data.append('\n'.join(d)) remove_c += 1 if remove_c > 0: module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff) module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, pacman_path, state, packages, package_files): install_c = 0 package_err = [] message = "" data = [] diff = { 'before': '', 'after': '', } to_install_repos = [] to_install_files = [] for i, package in enumerate(packages): # if the package is installed and state == present or state == latest and is up-to-date then skip installed, updated, latestError = query_package(module, pacman_path, package) if latestError and state == 'latest': package_err.append(package) if installed and (state == 'present' or (state == 'latest' and updated)): continue if package_files[i]: to_install_files.append(package_files[i]) else: to_install_repos.append(package) if to_install_repos: cmd = "%s -S %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_repos)) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr)) data = stdout.split('\n')[3].split(' ')[2:] data = [i for i in data if i != ''] for i, pkg in enumerate(data): data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1]) if module._diff: diff['after'] += "%s\n" % pkg install_c += len(to_install_repos) if to_install_files: cmd = "%s -U %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_files)) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr)) data = stdout.split('\n')[3].split(' ')[2:] data = [i for i in data if i != ''] for i, pkg in enumerate(data): data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1]) if module._diff: diff['after'] += "%s\n" % pkg install_c += len(to_install_files) if state == 'latest' and len(package_err) > 0: message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err) if install_c > 0: module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff) module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff) def check_packages(module, pacman_path, packages, state): would_be_changed = [] diff = { 'before': '', 'after': '', 'before_header': '', 'after_header': '' } for package in packages: installed, updated, unknown = query_package(module, pacman_path, package) if ((state in ["present", "latest"] and not installed) or (state == "absent" and installed) or (state == "latest" and not updated)): would_be_changed.append(package) if would_be_changed: if state == "absent": state = "removed" if module._diff and (state == 'removed'): diff['before_header'] = 'removed' diff['before'] = '\n'.join(would_be_changed) + '\n' elif module._diff and ((state == 'present') or (state == 'latest')): diff['after_header'] = 'installed' diff['after'] = '\n'.join(would_be_changed) + '\n' module.exit_json(changed=True, msg="%s package(s) would be %s" % ( len(would_be_changed), state), diff=diff) else: module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff) def expand_package_groups(module, pacman_path, pkgs): expanded = [] for pkg in pkgs: if pkg: # avoid empty strings cmd = "%s -Sgq %s" % (pacman_path, pkg) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: # A group was found matching the name, so expand it for name in stdout.split('\n'): name = name.strip() if name: expanded.append(name) else: expanded.append(pkg) return expanded def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='list', aliases=['package', 'pkg']), state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), recurse=dict(type='bool', default=False), force=dict(type='bool', default=False), upgrade=dict(type='bool', default=False), update_cache=dict(type='bool', default=False, aliases=['update-cache']), ), required_one_of=[['name', 'update_cache', 'upgrade']], supports_check_mode=True, ) pacman_path = module.get_bin_path('pacman', True) p = module.params # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' elif p['state'] in ['absent', 'removed']: p['state'] = 'absent' if p["update_cache"] and not module.check_mode: update_package_db(module, pacman_path) if not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Updated the package master lists') if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Would have updated the package cache') if p['upgrade']: upgrade(module, pacman_path) if p['name']: pkgs = expand_package_groups(module, pacman_path, p['name']) pkg_files = [] for i, pkg in enumerate(pkgs): if not pkg: # avoid empty strings continue elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg): # The package given is a filename, extract the raw pkg name from # it and store the filename pkg_files.append(pkg) pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1]) else: pkg_files.append(None) if module.check_mode: check_packages(module, pacman_path, pkgs, p['state']) if p['state'] in ['present', 'latest']: install_packages(module, pacman_path, p['state'], pkgs, pkg_files) elif p['state'] == 'absent': remove_packages(module, pacman_path, pkgs) if __name__ == "__main__": main()
gpl-3.0
Ronak6892/servo
python/mach/mach/terminal.py
100
2061
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """This file contains code for interacting with terminals. All the terminal interaction code is consolidated so the complexity can be in one place, away from code that is commonly looked at. """ from __future__ import absolute_import, print_function, unicode_literals import logging import sys class LoggingHandler(logging.Handler): """Custom logging handler that works with terminal window dressing. This is alternative terminal logging handler which contains smarts for emitting terminal control characters properly. Currently, it has generic support for "footer" elements at the bottom of the screen. Functionality can be added when needed. """ def __init__(self): logging.Handler.__init__(self) self.fh = sys.stdout self.footer = None def flush(self): self.acquire() try: self.fh.flush() finally: self.release() def emit(self, record): msg = self.format(record) if self.footer: self.footer.clear() self.fh.write(msg) self.fh.write('\n') if self.footer: self.footer.draw() # If we don't flush, the footer may not get drawn. self.flush() class TerminalFooter(object): """Represents something drawn on the bottom of a terminal.""" def __init__(self, terminal): self.t = terminal self.fh = sys.stdout def _clear_lines(self, n): for i in xrange(n): self.fh.write(self.t.move_x(0)) self.fh.write(self.t.clear_eol()) self.fh.write(self.t.move_up()) self.fh.write(self.t.move_down()) self.fh.write(self.t.move_x(0)) def clear(self): raise Exception('clear() must be implemented.') def draw(self): raise Exception('draw() must be implemented.')
mpl-2.0
gnmiller/craig-bot
craig-bot/lib/python3.6/site-packages/pip/_internal/utils/temp_dir.py
21
5339
from __future__ import absolute_import import errno import itertools import logging import os.path import tempfile from pip._internal.utils.misc import rmtree logger = logging.getLogger(__name__) class TempDirectory(object): """Helper class that owns and cleans up a temporary directory. This class can be used as a context manager or as an OO representation of a temporary directory. Attributes: path Location to the created temporary directory or None delete Whether the directory should be deleted when exiting (when used as a contextmanager) Methods: create() Creates a temporary directory and stores its path in the path attribute. cleanup() Deletes the temporary directory and sets path attribute to None When used as a context manager, a temporary directory is created on entering the context and, if the delete attribute is True, on exiting the context the created directory is deleted. """ def __init__(self, path=None, delete=None, kind="temp"): super(TempDirectory, self).__init__() if path is None and delete is None: # If we were not given an explicit directory, and we were not given # an explicit delete option, then we'll default to deleting. delete = True self.path = path self.delete = delete self.kind = kind def __repr__(self): return "<{} {!r}>".format(self.__class__.__name__, self.path) def __enter__(self): self.create() return self def __exit__(self, exc, value, tb): if self.delete: self.cleanup() def create(self): """Create a temporary directory and store its path in self.path """ if self.path is not None: logger.debug( "Skipped creation of temporary directory: {}".format(self.path) ) return # We realpath here because some systems have their default tmpdir # symlinked to another directory. This tends to confuse build # scripts, so we canonicalize the path by traversing potential # symlinks here. self.path = os.path.realpath( tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) ) logger.debug("Created temporary directory: {}".format(self.path)) def cleanup(self): """Remove the temporary directory created and reset state """ if self.path is not None and os.path.exists(self.path): rmtree(self.path) self.path = None class AdjacentTempDirectory(TempDirectory): """Helper class that creates a temporary directory adjacent to a real one. Attributes: original The original directory to create a temp directory for. path After calling create() or entering, contains the full path to the temporary directory. delete Whether the directory should be deleted when exiting (when used as a contextmanager) """ # The characters that may be used to name the temp directory # We always prepend a ~ and then rotate through these until # a usable name is found. # pkg_resources raises a different error for .dist-info folder # with leading '-' and invalid metadata LEADING_CHARS = "-~.=%0123456789" def __init__(self, original, delete=None): super(AdjacentTempDirectory, self).__init__(delete=delete) self.original = original.rstrip('/\\') @classmethod def _generate_names(cls, name): """Generates a series of temporary names. The algorithm replaces the leading characters in the name with ones that are valid filesystem characters, but are not valid package names (for both Python and pip definitions of package). """ for i in range(1, len(name)): for candidate in itertools.combinations_with_replacement( cls.LEADING_CHARS, i - 1): new_name = '~' + ''.join(candidate) + name[i:] if new_name != name: yield new_name # If we make it this far, we will have to make a longer name for i in range(len(cls.LEADING_CHARS)): for candidate in itertools.combinations_with_replacement( cls.LEADING_CHARS, i): new_name = '~' + ''.join(candidate) + name if new_name != name: yield new_name def create(self): root, name = os.path.split(self.original) for candidate in self._generate_names(name): path = os.path.join(root, candidate) try: os.mkdir(path) except OSError as ex: # Continue if the name exists already if ex.errno != errno.EEXIST: raise else: self.path = os.path.realpath(path) break if not self.path: # Final fallback on the default behavior. self.path = os.path.realpath( tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) ) logger.debug("Created temporary directory: {}".format(self.path))
mit
oubiwann-unsupported/pyrrd
pyrrd/backend/tests/test_external.py
3
3082
from cStringIO import StringIO import os import sys import tempfile from unittest import TestCase from pyrrd.exceptions import ExternalCommandError from pyrrd.rrd import DataSource, RRA, RRD class ExternalBackendTestCase(TestCase): def setUp(self): ds = [ DataSource(dsName="speed", dsType="COUNTER", heartbeat=600)] rra = [ RRA(cf="AVERAGE", xff=0.5, steps=1, rows=24), RRA(cf="AVERAGE", xff=0.5, steps=6, rows=10)] self.rrdfile = tempfile.NamedTemporaryFile() self.rrd = RRD(self.rrdfile.name, ds=ds, rra=rra, start=920804400) self.rrd.create() def test_updateError(self): self.rrd.bufferValue(1261214678, 612) self.rrd.bufferValue(1261214678, 612) self.assertRaises(ExternalCommandError, self.rrd.update) expected = ("illegal attempt to update using time 1261214678 " "when last update time is 1261214678 (minimum one second " "step)") try: self.rrd.update() except ExternalCommandError, error: self.assertTrue(str(error).startswith("ERROR:")) self.assertTrue(str(error).endswith(expected)) def test_infoWriteMode(self): expectedOutput = """ rra = [{'rows': 24, 'database': None, 'cf': 'AVERAGE', 'cdp_prep': None, 'beta': None, 'seasonal_period': None, 'steps': 1, 'window_length': None, 'threshold': None, 'alpha': None, 'pdp_per_row': None, 'xff': 0.5, 'ds': [], 'gamma': None, 'rra_num': None}, {'rows': 10, 'database': None, 'cf': 'AVERAGE', 'cdp_prep': None, 'beta': None, 'seasonal_period': None, 'steps': 6, 'window_length': None, 'threshold': None, 'alpha': None, 'pdp_per_row': None, 'xff': 0.5, 'ds': [], 'gamma': None, 'rra_num': None}] filename = /tmp/tmpQCLRj0 start = 920804400 step = 300 values = [] ds = [{'name': 'speed', 'min': 'U', 'max': 'U', 'unknown_sec': None, 'minimal_heartbeat': 600, 'value': None, 'rpn': None, 'type': 'COUNTER', 'last_ds': None}] ds[speed].name = speed ds[speed].min = U ds[speed].max = U ds[speed].minimal_heartbeat = 600 ds[speed].type = COUNTER rra[0].rows = 24 rra[0].cf = AVERAGE rra[0].steps = 1 rra[0].xff = 0.5 rra[0].ds = [] rra[1].rows = 10 rra[1].cf = AVERAGE rra[1].steps = 6 rra[1].xff = 0.5 rra[1].ds = [] """.strip().split("\n") originalStdout = sys.stdout sys.stdout = StringIO() self.assertTrue(os.path.exists(self.rrdfile.name)) self.rrd.info() for obtained, expected in zip( sys.stdout.getvalue().split("\n"), expectedOutput): if obtained.startswith("filename"): self.assertTrue(expected.strip().startswith("filename")) else: self.assertEquals(obtained.strip(), expected.strip()) sys.stdout = originalStdout
bsd-3-clause
kvar/ansible
lib/ansible/module_utils/heroku.py
47
1223
# Copyright: (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function __metaclass__ = type import traceback from ansible.module_utils.basic import env_fallback, missing_required_lib HAS_HEROKU = False HEROKU_IMP_ERR = None try: import heroku3 HAS_HEROKU = True except ImportError: HEROKU_IMP_ERR = traceback.format_exc() class HerokuHelper(): def __init__(self, module): self.module = module self.check_lib() self.api_key = module.params["api_key"] def check_lib(self): if not HAS_HEROKU: self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR) @staticmethod def heroku_argument_spec(): return dict( api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True)) def get_heroku_client(self): client = heroku3.from_key(self.api_key) if not client.is_authenticated: self.module.fail_json(msg='Heroku authentication failure, please check your API Key') return client
gpl-3.0
Outernet-Project/outernet-broadman
broadman/db.py
1
1973
""" Functions for working with broadcast database Copyright 2015, Outernet Inc. Some rights reserved. This software is free software licensed under the terms of GPLv3. See COPYING file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt. """ import sqlite3 import datetime import sqlize as sql from . import path OperationalError = sqlite3.OperationalError ProgrammingError = sqlite3.ProgrammingError class DB: TABLE = 'broadcasts' SCHEMA = """ create table if not exists broadcasts ( content_id text, server_id text, commit_hash text, title text, url text, size integer, collected timestamp, packed timestamp, aired timestamp, removed timestamp, expires timestamp ); """ def __init__(self, db=path.BROADCAST): self.con = sqlite3.connect(db) self.con.row_factory = sqlite3.Row self.create_table() def create_table(self): self.con.executescript(self.SCHEMA) def add_content(self, id, server, commit, title, url, size, collected, packed, aired, expires=None): q = sql.Insert(self.TABLE, cols=( 'content_id', 'server_id', 'commit_hash', 'title', 'url', 'size', 'collected', 'packed', 'aired', 'expires')) self.con.execute(str(q), { 'content_id': id, 'server_id': server, 'commit_hash': commit, 'title': title, 'url': url, 'size': size, 'collected': collected, 'packed': packed, 'aired': aired, 'expires': expires, }) self.con.commit() self.con.close() def remove_content(self, id): q = sql.Update(self.TABLE, 'content_id=:id', removed=':time') self.con.execute(str(q), {'id': id, 'time': datetime.datetime.today()}) self.con.commit() self.con.close()
gpl-3.0
vstinner/pyperf
pyperf/_timeit.py
2
6742
import itertools import sys import time import traceback import pyperf PYPY = (pyperf.python_implementation() == 'pypy') DUMMY_SRC_NAME = "<timeit-src>" # Don't change the indentation of the template; the reindent() calls # in Timer.__init__() depend on setup being indented 4 spaces and stmt # being indented 8 spaces. TEMPLATE = """ def inner(_it, _timer{init}): {setup} _t0 = _timer() for _i in _it: {stmt} _t1 = _timer() {teardown} return _t1 - _t0 """ PYPY_TEMPLATE = """ def inner(_it, _timer{init}): {setup} _t0 = _timer() while _it > 0: _it -= 1 {stmt} _t1 = _timer() {teardown} return _t1 - _t0 """ def reindent(src, indent): return src.replace("\n", "\n" + " " * indent) class Timer: def __init__(self, stmt="pass", setup="pass", teardown="pass", globals=None): self.local_ns = {} self.global_ns = {} if globals is None else globals self.filename = DUMMY_SRC_NAME init = '' if isinstance(setup, str): # Check that the code can be compiled outside a function compile(setup, self.filename, "exec") full = setup + '\n' setup = reindent(setup, 4) elif callable(setup): self.local_ns['_setup'] = setup init += ', _setup=_setup' full = '' setup = '_setup()' else: raise ValueError("setup is neither a string nor callable") if isinstance(stmt, str): # Check that the code can be compiled outside a function compile(full + stmt, self.filename, "exec") full = full + stmt + '\n' stmt = reindent(stmt, 8) elif callable(stmt): self.local_ns['_stmt'] = stmt init += ', _stmt=_stmt' full = '' stmt = '_stmt()' else: raise ValueError("stmt is neither a string nor callable") if isinstance(teardown, str): # Check that the code can be compiled outside a function compile(full + teardown, self.filename, "exec") teardown = reindent(teardown, 4) elif callable(teardown): self.local_ns['_teardown'] = teardown init += ', _teardown=_teardown' teardown = '_teardown()' else: raise ValueError("teardown is neither a string nor callable") if PYPY: template = PYPY_TEMPLATE else: template = TEMPLATE src = template.format(stmt=stmt, setup=setup, init=init, teardown=teardown) self.src = src # Save for traceback display def make_inner(self): # PyPy tweak: recompile the source code each time before # calling inner(). There are situations like Issue #1776 # where PyPy tries to reuse the JIT code from before, # but that's not going to work: the first thing the # function does is the "-s" statement, which may declare # new classes (here a namedtuple). We end up with # bridges from the inner loop; more and more of them # every time we call inner(). code = compile(self.src, self.filename, "exec") global_ns = dict(self.global_ns) local_ns = dict(self.local_ns) exec(code, global_ns, local_ns) return local_ns["inner"] def update_linecache(self, file=None): import linecache linecache.cache[self.filename] = (len(self.src), None, self.src.split("\n"), self.filename) def time_func(self, loops): inner = self.make_inner() timer = time.perf_counter if not PYPY: it = itertools.repeat(None, loops) return inner(it, timer) else: # PyPy return inner(loops, timer) def strip_statements(statements): result = [] for stmt in statements: stmt = stmt.rstrip() if stmt: result.append(stmt) return result def format_statements(statements): return ' '.join(repr(stmt) for stmt in statements) def create_timer(stmt, setup, teardown, globals): # Include the current directory, so that local imports work (sys.path # contains the directory of this script, rather than the current # directory) import os sys.path.insert(0, os.curdir) stmt = "\n".join(stmt) setup = "\n".join(setup) teardown = "\n".join(teardown) return Timer(stmt, setup, teardown, globals=globals) def display_error(timer, stmt, setup, teardown): print("Error when running timeit benchmark:") print() print("Statement:") for expr in stmt: print(repr(expr)) print() if setup: print("Setup:") for expr in setup: print(repr(expr)) print() if teardown: print("Teardown:") for expr in teardown: print(repr(expr)) print() if timer is not None: timer.update_linecache() traceback.print_exc() def bench_timeit(runner, name, stmt, setup, teardown, inner_loops=None, duplicate=None, func_metadata=None, globals=None): if isinstance(stmt, str): stmt = (stmt,) if isinstance(setup, str): setup = (setup,) if isinstance(teardown, str): teardown = (teardown,) stmt = strip_statements(stmt) setup = strip_statements(setup) teardown = strip_statements(teardown) if not stmt: raise ValueError("need at least one statement") metadata = {} if func_metadata: metadata.update(func_metadata) if setup: metadata['timeit_setup'] = format_statements(setup) if teardown: metadata['timeit_teardown'] = format_statements(teardown) metadata['timeit_stmt'] = format_statements(stmt) orig_stmt = stmt # args must not be modified, it's passed to the worker process, # so use local variables. if duplicate and duplicate > 1: stmt = stmt * duplicate if inner_loops: inner_loops *= duplicate else: inner_loops = duplicate metadata['timeit_duplicate'] = duplicate kwargs = {'metadata': metadata} if inner_loops: kwargs['inner_loops'] = inner_loops timer = None try: timer = create_timer(stmt, setup, teardown, globals) runner.bench_time_func(name, timer.time_func, **kwargs) except SystemExit: raise except: # noqa: E722 display_error(timer, orig_stmt, setup, teardown) sys.exit(1)
mit
joshblum/django-with-audit
tests/regressiontests/httpwrappers/tests.py
24
14176
import copy import pickle from django.core.exceptions import SuspiciousOperation from django.http import (QueryDict, HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, SimpleCookie, BadHeaderError, parse_cookie) from django.utils import unittest class QueryDictTests(unittest.TestCase): def test_missing_key(self): q = QueryDict('') self.assertRaises(KeyError, q.__getitem__, 'foo') def test_immutability(self): q = QueryDict('') self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar') self.assertRaises(AttributeError, q.setlist, 'foo', ['bar']) self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar']) self.assertRaises(AttributeError, q.update, {'foo': 'bar'}) self.assertRaises(AttributeError, q.pop, 'foo') self.assertRaises(AttributeError, q.popitem) self.assertRaises(AttributeError, q.clear) def test_immutable_get_with_default(self): q = QueryDict('') self.assertEqual(q.get('foo', 'default'), 'default') def test_immutable_basic_operations(self): q = QueryDict('') self.assertEqual(q.getlist('foo'), []) self.assertEqual(q.has_key('foo'), False) self.assertEqual('foo' in q, False) self.assertEqual(q.items(), []) self.assertEqual(q.lists(), []) self.assertEqual(q.items(), []) self.assertEqual(q.keys(), []) self.assertEqual(q.values(), []) self.assertEqual(len(q), 0) self.assertEqual(q.urlencode(), '') def test_single_key_value(self): """Test QueryDict with one key/value pair""" q = QueryDict('foo=bar') self.assertEqual(q['foo'], 'bar') self.assertRaises(KeyError, q.__getitem__, 'bar') self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar') self.assertEqual(q.get('foo', 'default'), 'bar') self.assertEqual(q.get('bar', 'default'), 'default') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.getlist('bar'), []) self.assertRaises(AttributeError, q.setlist, 'foo', ['bar']) self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar']) self.assertTrue(q.has_key('foo')) self.assertTrue('foo' in q) self.assertFalse(q.has_key('bar')) self.assertFalse('bar' in q) self.assertEqual(q.items(), [(u'foo', u'bar')]) self.assertEqual(q.lists(), [(u'foo', [u'bar'])]) self.assertEqual(q.keys(), ['foo']) self.assertEqual(q.values(), ['bar']) self.assertEqual(len(q), 1) self.assertRaises(AttributeError, q.update, {'foo': 'bar'}) self.assertRaises(AttributeError, q.pop, 'foo') self.assertRaises(AttributeError, q.popitem) self.assertRaises(AttributeError, q.clear) self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar') self.assertEqual(q.urlencode(), 'foo=bar') def test_urlencode(self): q = QueryDict('', mutable=True) q['next'] = '/a&b/' self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F') self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/') q = QueryDict('', mutable=True) q['next'] = u'/t\xebst&key/' self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F') self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/') def test_mutable_copy(self): """A copy of a QueryDict is mutable.""" q = QueryDict('').copy() self.assertRaises(KeyError, q.__getitem__, "foo") q['name'] = 'john' self.assertEqual(q['name'], 'john') def test_mutable_delete(self): q = QueryDict('').copy() q['name'] = 'john' del q['name'] self.assertFalse('name' in q) def test_basic_mutable_operations(self): q = QueryDict('').copy() q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') self.assertTrue(q.has_key('foo')) self.assertTrue('foo' in q) self.assertEqual(q.items(), [(u'foo', u'another'), (u'name', u'john')]) self.assertEqual(q.lists(), [(u'foo', [u'bar', u'baz', u'another']), (u'name', [u'john'])]) self.assertEqual(q.keys(), [u'foo', u'name']) self.assertEqual(q.values(), [u'another', u'john']) self.assertEqual(len(q), 2) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), [u'bar', u'baz', u'another', u'hello']) self.assertEqual(q.pop('foo'), [u'bar', u'baz', u'another', u'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.urlencode(), 'foo=bar&name=john') q.clear() self.assertEqual(len(q), 0) def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict('vote=yes&vote=no') self.assertEqual(q['vote'], u'no') self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar') self.assertEqual(q.get('vote', 'default'), u'no') self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.getlist('vote'), [u'yes', u'no']) self.assertEqual(q.getlist('foo'), []) self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz']) self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz']) self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar']) self.assertEqual(q.has_key('vote'), True) self.assertEqual('vote' in q, True) self.assertEqual(q.has_key('foo'), False) self.assertEqual('foo' in q, False) self.assertEqual(q.items(), [(u'vote', u'no')]) self.assertEqual(q.lists(), [(u'vote', [u'yes', u'no'])]) self.assertEqual(q.keys(), [u'vote']) self.assertEqual(q.values(), [u'no']) self.assertEqual(len(q), 1) self.assertRaises(AttributeError, q.update, {'foo': 'bar'}) self.assertRaises(AttributeError, q.pop, 'foo') self.assertRaises(AttributeError, q.popitem) self.assertRaises(AttributeError, q.clear) self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar') self.assertRaises(AttributeError, q.__delitem__, 'vote') def test_invalid_input_encoding(self): """ QueryDicts must be able to handle invalid input encoding (in this case, bad UTF-8 encoding). """ q = QueryDict('foo=bar&foo=\xff') self.assertEqual(q['foo'], u'\ufffd') self.assertEqual(q.getlist('foo'), [u'bar', u'\ufffd']) def test_pickle(self): q = QueryDict('') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q == q1, True) q = QueryDict('a=b&c=d') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q == q1, True) q = QueryDict('a=b&c=d&a=1') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q == q1 , True) def test_update_from_querydict(self): """Regression test for #8278: QueryDict.update(QueryDict)""" x = QueryDict("a=1&a=2", mutable=True) y = QueryDict("a=3&a=4") x.update(y) self.assertEqual(x.getlist('a'), [u'1', u'2', u'3', u'4']) def test_non_default_encoding(self): """#13572 - QueryDict with a non-default encoding""" q = QueryDict('sbb=one', encoding='rot_13') self.assertEqual(q.encoding , 'rot_13' ) self.assertEqual(q.items() , [(u'foo', u'bar')] ) self.assertEqual(q.urlencode() , 'sbb=one' ) q = q.copy() self.assertEqual(q.encoding , 'rot_13' ) self.assertEqual(q.items() , [(u'foo', u'bar')] ) self.assertEqual(q.urlencode() , 'sbb=one' ) self.assertEqual(copy.copy(q).encoding , 'rot_13' ) self.assertEqual(copy.deepcopy(q).encoding , 'rot_13') class HttpResponseTests(unittest.TestCase): def test_unicode_headers(self): r = HttpResponse() # If we insert a unicode value it will be converted to an ascii r['value'] = u'test value' self.assertTrue(isinstance(r['value'], str)) # An error is raised when a unicode object with non-ascii is assigned. self.assertRaises(UnicodeEncodeError, r.__setitem__, 'value', u't\xebst value') # An error is raised when a unicode object with non-ASCII format is # passed as initial mimetype or content_type. self.assertRaises(UnicodeEncodeError, HttpResponse, content_type=u't\xebst value') # HttpResponse headers must be convertible to ASCII. self.assertRaises(UnicodeEncodeError, HttpResponse, content_type=u't\xebst value') # The response also converts unicode keys to strings.) r[u'test'] = 'testing key' l = list(r.items()) l.sort() self.assertEqual(l[1], ('test', 'testing key')) # It will also raise errors for keys with non-ascii data. self.assertRaises(UnicodeEncodeError, r.__setitem__, u't\xebst key', 'value') def test_newlines_in_headers(self): # Bug #10188: Do not allow newlines in headers (CR or LF) r = HttpResponse() self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test') self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test') def test_dict_behavior(self): """ Test for bug #14020: Make HttpResponse.get work like dict.get """ r = HttpResponse() self.assertEqual(r.get('test'), None) def test_non_string_content(self): #Bug 16494: HttpResponse should behave consistently with non-strings r = HttpResponse(12345) self.assertEqual(r.content, '12345') #test content via property r = HttpResponse() r.content = 12345 self.assertEqual(r.content, '12345') def test_iter_content(self): r = HttpResponse(['abc', 'def', 'ghi']) self.assertEqual(r.content, 'abcdefghi') #test iter content via property r = HttpResponse() r.content = ['idan', 'alex', 'jacob'] self.assertEqual(r.content, 'idanalexjacob') r = HttpResponse() r.content = [1, 2, 3] self.assertEqual(r.content, '123') #test retrieval explicitly using iter and odd inputs r = HttpResponse() r.content = ['1', u'2', 3, unichr(1950)] result = [] my_iter = r.__iter__() while True: try: result.append(my_iter.next()) except StopIteration: break #'\xde\x9e' == unichr(1950).encode('utf-8') self.assertEqual(result, ['1', '2', '3', '\xde\x9e']) self.assertEqual(r.content, '123\xde\x9e') #with Content-Encoding header r = HttpResponse([1,1,2,4,8]) r['Content-Encoding'] = 'winning' self.assertEqual(r.content, '11248') r.content = [unichr(1950),] self.assertRaises(UnicodeEncodeError, getattr, r, 'content') def test_unsafe_redirect(self): bad_urls = [ 'data:text/html,<script>window.alert("xss")</script>', 'mailto:test@example.com', 'file:///etc/passwd', ] for url in bad_urls: self.assertRaises(SuspiciousOperation, HttpResponseRedirect, url) self.assertRaises(SuspiciousOperation, HttpResponsePermanentRedirect, url) class CookieTests(unittest.TestCase): def test_encode(self): """ Test that we don't output tricky characters in encoded value """ c = SimpleCookie() c['test'] = "An,awkward;value" self.assertTrue(";" not in c.output().rstrip(';')) # IE compat self.assertTrue("," not in c.output().rstrip(';')) # Safari compat def test_decode(self): """ Test that we can still preserve semi-colons and commas """ c = SimpleCookie() c['test'] = "An,awkward;value" c2 = SimpleCookie() c2.load(c.output()) self.assertEqual(c['test'].value, c2['test'].value) def test_decode_2(self): """ Test that we haven't broken normal encoding """ c = SimpleCookie() c['test'] = "\xf0" c2 = SimpleCookie() c2.load(c.output()) self.assertEqual(c['test'].value, c2['test'].value) def test_nonstandard_keys(self): """ Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007. """ self.assertTrue('good_cookie' in parse_cookie('good_cookie=yes;bad:cookie=yes').keys()) def test_repeated_nonstandard_keys(self): """ Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852 """ self.assertTrue('good_cookie' in parse_cookie('a,=b; a,=c; good_cookie=yes').keys()) def test_httponly_after_load(self): """ Test that we can use httponly attribute on cookies that we load """ c = SimpleCookie() c.load("name=val") c['name']['httponly'] = True self.assertTrue(c['name']['httponly'])
bsd-3-clause
crafty78/ansible
lib/ansible/modules/network/cumulus/cl_interface_policy.py
34
5248
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com> # # This file is part of Ansible # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: cl_interface_policy version_added: "2.1" author: "Cumulus Networks (@CumulusNetworks)" short_description: Configure interface enforcement policy on Cumulus Linux description: - This module affects the configuration files located in the interfaces folder defined by ifupdown2. Interfaces port and port ranges listed in the "allowed" parameter define what interfaces will be available on the switch. If the user runs this module and has an interface configured on the switch, but not found in the "allowed" list, this interface will be unconfigured. By default this is `/etc/network/interface.d` For more details go the Configuring Interfaces at U(http://docs.cumulusnetworks.com). notes: - lo must be included in the allowed list. - eth0 must be in allowed list if out of band management is done options: allowed: description: - List of ports to run initial run at 10G. required: true location: description: - Directory to store interface files. default: '/etc/network/interfaces.d/' ''' EXAMPLES = ''' Example playbook entries using the cl_interface_policy module. - name: shows types of interface ranges supported cl_interface_policy: allowed: "lo eth0 swp1-9, swp11, swp12-13s0, swp12-30s1, swp12-30s2, bond0-12" ''' RETURN = ''' changed: description: whether the interface was changed returned: changed type: bool sample: True msg: description: human-readable report of success or failure returned: always type: string sample: "interface bond0 config updated" ''' # get list of interface files that are currently "configured". # doesn't mean actually applied to the system, but most likely are def read_current_int_dir(module): module.custom_currentportlist = os.listdir(module.params.get('location')) # take the allowed list and convert it to into a list # of ports. def convert_allowed_list_to_port_range(module): allowedlist = module.params.get('allowed') for portrange in allowedlist: module.custom_allowedportlist += breakout_portrange(portrange) def breakout_portrange(prange): _m0 = re.match(r'(\w+[a-z.])(\d+)?-?(\d+)?(\w+)?', prange.strip()) # no range defined if _m0.group(3) is None: return [_m0.group(0)] else: portarray = [] intrange = range(int(_m0.group(2)), int(_m0.group(3)) + 1) for _int in intrange: portarray.append(''.join([_m0.group(1), str(_int), str(_m0.group(4) or '') ] ) ) return portarray # deletes the interface files def unconfigure_interfaces(module): currentportset = set(module.custom_currentportlist) allowedportset = set(module.custom_allowedportlist) remove_list = currentportset.difference(allowedportset) fileprefix = module.params.get('location') module.msg = "remove config for interfaces %s" % (', '.join(remove_list)) for _file in remove_list: os.unlink(fileprefix + _file) # check to see if policy should be enforced # returns true if policy needs to be enforced # that is delete interface files def int_policy_enforce(module): currentportset = set(module.custom_currentportlist) allowedportset = set(module.custom_allowedportlist) return not currentportset.issubset(allowedportset) def main(): module = AnsibleModule( argument_spec=dict( allowed=dict(type='list', required=True), location=dict(type='str', default='/etc/network/interfaces.d/') ), ) module.custom_currentportlist = [] module.custom_allowedportlist = [] module.changed = False module.msg = 'configured port list is part of allowed port list' read_current_int_dir(module) convert_allowed_list_to_port_range(module) if int_policy_enforce(module): module.changed = True unconfigure_interfaces(module) module.exit_json(changed=module.changed, msg=module.msg) # import module snippets from ansible.module_utils.basic import * # from ansible.module_utils.urls import * import os import shutil if __name__ == '__main__': main()
gpl-3.0
shakamunyi/neutron
neutron/tests/unit/db/test_db_base_plugin_v2.py
3
280256
# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import itertools import mock import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils from sqlalchemy import orm from testtools import matchers import webob.exc import neutron from neutron.api import api_common from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import router from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import test_lib from neutron.common import utils from neutron import context from neutron.db import db_base_plugin_v2 from neutron.db import models_v2 from neutron import manager from neutron.tests import base from neutron.tests import tools from neutron.tests.unit.api import test_extensions from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' DEVICE_OWNER_COMPUTE = 'compute:None' DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP def optional_ctx(obj, fallback): if not obj: return fallback() @contextlib.contextmanager def context_wrapper(): yield obj return context_wrapper() def _fake_get_pagination_helper(self, request): return api_common.PaginationEmulatedHelper(request, self._primary_key) def _fake_get_sorting_helper(self, request): return api_common.SortingEmulatedHelper(request, self._attr_info) # TODO(banix): Move the following method to ML2 db test module when ML2 # mechanism driver unit tests are corrected to use Ml2PluginV2TestCase # instead of directly using NeutronDbPluginV2TestCase def _get_create_db_method(resource): ml2_method = '_create_%s_db' % resource if hasattr(manager.NeutronManager.get_plugin(), ml2_method): return ml2_method else: return 'create_%s' % resource class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): fmt = 'json' resource_prefix_map = {} def setUp(self, plugin=None, service_plugins=None, ext_mgr=None): super(NeutronDbPluginV2TestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) cfg.CONF.set_override('allow_overlapping_ips', True) # Make sure at each test according extensions for the plugin is loaded extensions.PluginAwareExtensionManager._instance = None # Save the attributes map in case the plugin will alter it # loading extensions self.useFixture(tools.AttributeMapMemento()) self._tenant_id = 'test-tenant' if not plugin: plugin = DB_PLUGIN_KLASS # Update the plugin self.setup_coreplugin(plugin) cfg.CONF.set_override( 'service_plugins', [test_lib.test_config.get(key, default) for key, default in (service_plugins or {}).iteritems()] ) cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") cfg.CONF.set_override('max_dns_nameservers', 2) cfg.CONF.set_override('max_subnet_host_routes', 2) cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) self.api = router.APIRouter() # Set the defualt status self.net_create_status = 'ACTIVE' self.port_create_status = 'ACTIVE' def _is_native_bulk_supported(): plugin_obj = manager.NeutronManager.get_plugin() native_bulk_attr_name = ("_%s__native_bulk_support" % plugin_obj.__class__.__name__) return getattr(plugin_obj, native_bulk_attr_name, False) self._skip_native_bulk = not _is_native_bulk_supported() def _is_native_pagination_support(): native_pagination_attr_name = ( "_%s__native_pagination_support" % manager.NeutronManager.get_plugin().__class__.__name__) return (cfg.CONF.allow_pagination and getattr(manager.NeutronManager.get_plugin(), native_pagination_attr_name, False)) self._skip_native_pagination = not _is_native_pagination_support() def _is_native_sorting_support(): native_sorting_attr_name = ( "_%s__native_sorting_support" % manager.NeutronManager.get_plugin().__class__.__name__) return (cfg.CONF.allow_sorting and getattr(manager.NeutronManager.get_plugin(), native_sorting_attr_name, False)) self.plugin = manager.NeutronManager.get_plugin() self._skip_native_sorting = not _is_native_sorting_support() if ext_mgr: self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def tearDown(self): self.api = None self._deserializers = None self._skip_native_bulk = None self._skip_native_pagination = None self._skip_native_sortin = None self.ext_api = None super(NeutronDbPluginV2TestCase, self).tearDown() def setup_config(self): # Create the default configurations args = ['--config-file', base.etcdir('neutron.conf.test')] # If test_config specifies some config-file, use it, as well for config_file in test_lib.test_config.get('config_files', []): args.extend(['--config-file', config_file]) super(NeutronDbPluginV2TestCase, self).setup_config(args=args) def _req(self, method, resource, data=None, fmt=None, id=None, params=None, action=None, subresource=None, sub_id=None, context=None): fmt = fmt or self.fmt path = '/%s.%s' % ( '/'.join(p for p in (resource, id, subresource, sub_id, action) if p), fmt ) prefix = self.resource_prefix_map.get(resource) if prefix: path = prefix + path content_type = 'application/%s' % fmt body = None if data is not None: # empty dict is valid body = self.serialize(data) return testlib_api.create_request(path, body, content_type, method, query_string=params, context=context) def new_create_request(self, resource, data, fmt=None, id=None, subresource=None, context=None): return self._req('POST', resource, data, fmt, id=id, subresource=subresource, context=context) def new_list_request(self, resource, fmt=None, params=None, subresource=None): return self._req( 'GET', resource, None, fmt, params=params, subresource=subresource ) def new_show_request(self, resource, id, fmt=None, subresource=None, fields=None): if fields: params = "&".join(["fields=%s" % x for x in fields]) else: params = None return self._req('GET', resource, None, fmt, id=id, params=params, subresource=subresource) def new_delete_request(self, resource, id, fmt=None, subresource=None, sub_id=None): return self._req( 'DELETE', resource, None, fmt, id=id, subresource=subresource, sub_id=sub_id ) def new_update_request(self, resource, data, id, fmt=None, subresource=None, context=None): return self._req( 'PUT', resource, data, fmt, id=id, subresource=subresource, context=context ) def new_action_request(self, resource, data, id, action, fmt=None, subresource=None): return self._req( 'PUT', resource, data, fmt, id=id, action=action, subresource=subresource ) def deserialize(self, content_type, response): ctype = 'application/%s' % content_type data = self._deserializers[ctype].deserialize(response.body)['body'] return data def _create_bulk_from_list(self, fmt, resource, objects, **kwargs): """Creates a bulk request from a list of objects.""" collection = "%ss" % resource req_data = {collection: objects} req = self.new_create_request(collection, req_data, fmt) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) elif 'context' in kwargs: req.environ['neutron.context'] = kwargs['context'] return req.get_response(self.api) def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs): """Creates a bulk request for any kind of resource.""" objects = [] collection = "%ss" % resource for i in range(number): obj = copy.deepcopy(data) obj[resource]['name'] = "%s_%s" % (name, i) if 'override' in kwargs and i in kwargs['override']: obj[resource].update(kwargs['override'][i]) objects.append(obj) req_data = {collection: objects} req = self.new_create_request(collection, req_data, fmt) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) elif 'context' in kwargs: req.environ['neutron.context'] = kwargs['context'] return req.get_response(self.api) def _create_network(self, fmt, name, admin_state_up, arg_list=None, **kwargs): data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in (('admin_state_up', 'tenant_id', 'shared', 'vlan_transparent') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) return network_req.get_response(self.api) def _create_network_bulk(self, fmt, number, name, admin_state_up, **kwargs): base_data = {'network': {'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'network', base_data, **kwargs) def _create_subnet(self, fmt, net_id, cidr, expected_res_status=None, **kwargs): data = {'subnet': {'network_id': net_id, 'cidr': cidr, 'ip_version': 4, 'tenant_id': self._tenant_id}} for arg in ('ip_version', 'tenant_id', 'enable_dhcp', 'allocation_pools', 'dns_nameservers', 'host_routes', 'shared', 'ipv6_ra_mode', 'ipv6_address_mode'): # Arg must be present and not null (but can be false) if kwargs.get(arg) is not None: data['subnet'][arg] = kwargs[arg] if ('gateway_ip' in kwargs and kwargs['gateway_ip'] is not attributes.ATTR_NOT_SPECIFIED): data['subnet']['gateway_ip'] = kwargs['gateway_ip'] subnet_req = self.new_create_request('subnets', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request subnet_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) subnet_res = subnet_req.get_response(self.api) if expected_res_status: self.assertEqual(subnet_res.status_int, expected_res_status) return subnet_res def _create_subnet_bulk(self, fmt, number, net_id, name, ip_version=4, **kwargs): base_data = {'subnet': {'network_id': net_id, 'ip_version': ip_version, 'tenant_id': self._tenant_id}} # auto-generate cidrs as they should not overlap overrides = dict((k, v) for (k, v) in zip(range(number), [{'cidr': "10.0.%s.0/24" % num} for num in range(number)])) kwargs.update({'override': overrides}) return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) def _create_subnetpool(self, fmt, prefixes, expected_res_status=None, admin=False, **kwargs): subnetpool = {'subnetpool': {'prefixes': prefixes}} for k, v in kwargs.items(): subnetpool['subnetpool'][k] = str(v) api = self._api_for_resource('subnetpools') subnetpools_req = self.new_create_request('subnetpools', subnetpool, fmt) if not admin: neutron_context = context.Context('', kwargs['tenant_id']) subnetpools_req.environ['neutron.context'] = neutron_context subnetpool_res = subnetpools_req.get_response(api) if expected_res_status: self.assertEqual(subnetpool_res.status_int, expected_res_status) return subnetpool_res def _create_port(self, fmt, net_id, expected_res_status=None, arg_list=None, **kwargs): data = {'port': {'network_id': net_id, 'tenant_id': self._tenant_id}} for arg in (('admin_state_up', 'device_id', 'mac_address', 'name', 'fixed_ips', 'tenant_id', 'device_owner', 'security_groups') + (arg_list or ())): # Arg must be present if arg in kwargs: data['port'][arg] = kwargs[arg] # create a dhcp port device id if one hasn't been supplied if ('device_owner' in kwargs and kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and 'host' in kwargs and 'device_id' not in kwargs): device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) data['port']['device_id'] = device_id port_req = self.new_create_request('ports', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(port_res.status_int, expected_res_status) return port_res def _list_ports(self, fmt, expected_res_status=None, net_id=None, **kwargs): query_params = [] if net_id: query_params.append("network_id=%s" % net_id) if kwargs.get('device_owner'): query_params.append("device_owner=%s" % kwargs.get('device_owner')) port_req = self.new_list_request('ports', fmt, '&'.join(query_params)) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(port_res.status_int, expected_res_status) return port_res def _create_port_bulk(self, fmt, number, net_id, name, admin_state_up, **kwargs): base_data = {'port': {'network_id': net_id, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'port', base_data, **kwargs) def _make_network(self, fmt, name, admin_state_up, **kwargs): res = self._create_network(fmt, name, admin_state_up, **kwargs) # TODO(salvatore-orlando): do exception handling in this test module # in a uniform way (we do it differently for ports, subnets, and nets # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_subnet(self, fmt, network, gateway, cidr, allocation_pools=None, ip_version=4, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, ipv6_ra_mode=None, ipv6_address_mode=None): res = self._create_subnet(fmt, net_id=network['network']['id'], cidr=cidr, gateway_ip=gateway, tenant_id=network['network']['tenant_id'], allocation_pools=allocation_pools, ip_version=ip_version, enable_dhcp=enable_dhcp, dns_nameservers=dns_nameservers, host_routes=host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_address_mode) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs): res = self._create_subnetpool(fmt, prefixes, None, admin, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs): res = self._create_port(fmt, net_id, expected_res_status, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _api_for_resource(self, resource): if resource in ['networks', 'subnets', 'ports', 'subnetpools']: return self.api else: return self.ext_api def _delete(self, collection, id, expected_code=webob.exc.HTTPNoContent.code, neutron_context=None): req = self.new_delete_request(collection, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(collection)) self.assertEqual(res.status_int, expected_code) def _show_response(self, resource, id, neutron_context=None): req = self.new_show_request(resource, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context return req.get_response(self._api_for_resource(resource)) def _show(self, resource, id, expected_code=webob.exc.HTTPOk.code, neutron_context=None): res = self._show_response(resource, id, neutron_context=neutron_context) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _update(self, resource, id, new_data, expected_code=webob.exc.HTTPOk.code, neutron_context=None): req = self.new_update_request(resource, new_data, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(res.status_int, expected_code) return self.deserialize(self.fmt, res) def _list(self, resource, fmt=None, neutron_context=None, query_params=None): fmt = fmt or self.fmt req = self.new_list_request(resource, fmt, query_params) if neutron_context: req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) return self.deserialize(fmt, res) def _fail_second_call(self, patched_plugin, orig, *args, **kwargs): """Invoked by test cases for injecting failures in plugin.""" def second_call(*args, **kwargs): raise n_exc.NeutronException() patched_plugin.side_effect = second_call return orig(*args, **kwargs) def _validate_behavior_on_bulk_failure( self, res, collection, errcode=webob.exc.HTTPClientError.code): self.assertEqual(res.status_int, errcode) req = self.new_list_request(collection) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) items = self.deserialize(self.fmt, res) self.assertEqual(len(items[collection]), 0) def _validate_behavior_on_bulk_success(self, res, collection, names=['test_0', 'test_1']): self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) items = self.deserialize(self.fmt, res)[collection] self.assertEqual(len(items), 2) self.assertEqual(items[0]['name'], 'test_0') self.assertEqual(items[1]['name'], 'test_1') def _test_list_resources(self, resource, items, neutron_context=None, query_params=None): res = self._list('%ss' % resource, neutron_context=neutron_context, query_params=query_params) resource = resource.replace('-', '_') self.assertItemsEqual([i['id'] for i in res['%ss' % resource]], [i[resource]['id'] for i in items]) @contextlib.contextmanager def network(self, name='net1', admin_state_up=True, fmt=None, **kwargs): network = self._make_network(fmt or self.fmt, name, admin_state_up, **kwargs) yield network @contextlib.contextmanager def subnet(self, network=None, gateway_ip=attributes.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24', fmt=None, ip_version=4, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, ipv6_ra_mode=None, ipv6_address_mode=None): with optional_ctx(network, self.network) as network_to_use: subnet = self._make_subnet(fmt or self.fmt, network_to_use, gateway_ip, cidr, allocation_pools, ip_version, enable_dhcp, dns_nameservers, host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_address_mode) yield subnet @contextlib.contextmanager def subnetpool(self, prefixes, admin=False, **kwargs): subnetpool = self._make_subnetpool(self.fmt, prefixes, admin, **kwargs) yield subnetpool @contextlib.contextmanager def port(self, subnet=None, fmt=None, **kwargs): with optional_ctx(subnet, self.subnet) as subnet_to_use: net_id = subnet_to_use['subnet']['network_id'] port = self._make_port(fmt or self.fmt, net_id, **kwargs) yield port def _test_list_with_sort(self, resource, items, sorts, resources=None, query_params=''): query_str = query_params for key, direction in sorts: query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key, direction) if not resources: resources = '%ss' % resource req = self.new_list_request(resources, params=query_str) api = self._api_for_resource(resources) res = self.deserialize(self.fmt, req.get_response(api)) resource = resource.replace('-', '_') resources = resources.replace('-', '_') expected_res = [item[resource]['id'] for item in items] self.assertEqual(expected_res, [n['id'] for n in res[resources]]) def _test_list_with_pagination(self, resource, items, sort, limit, expected_page_num, resources=None, query_params='', verify_key='id'): if not resources: resources = '%ss' % resource query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&sort_key=%s&" "sort_dir=%s") % (limit, sort[0], sort[1]) req = self.new_list_request(resources, params=query_str) items_res = [] page_num = 0 api = self._api_for_resource(resources) resource = resource.replace('-', '_') resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) items_res = items_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'next': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) self.assertEqual([item[resource][verify_key] for item in items], [n[verify_key] for n in items_res]) def _test_list_with_pagination_reverse(self, resource, items, sort, limit, expected_page_num, resources=None, query_params=''): if not resources: resources = '%ss' % resource resource = resource.replace('-', '_') api = self._api_for_resource(resources) marker = items[-1][resource]['id'] query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&page_reverse=True&" "sort_key=%s&sort_dir=%s&" "marker=%s") % (limit, sort[0], sort[1], marker) req = self.new_list_request(resources, params=query_str) item_res = [items[-1][resource]] page_num = 0 resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) res[resources].reverse() item_res = item_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'previous': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) expected_res = [item[resource]['id'] for item in items] expected_res.reverse() self.assertEqual(expected_res, [n['id'] for n in item_res]) def _compare_resource(self, observed_res, expected_res, res_name): ''' Compare the observed and expected resources (ie compare subnets) ''' for k in expected_res: self.assertIn(k, observed_res[res_name]) if isinstance(expected_res[k], list): self.assertEqual(sorted(observed_res[res_name][k]), sorted(expected_res[k])) else: self.assertEqual(observed_res[res_name][k], expected_res[k]) def _validate_resource(self, resource, keys, res_name): for k in keys: self.assertIn(k, resource[res_name]) if isinstance(keys[k], list): self.assertEqual(sorted(resource[res_name][k]), sorted(keys[k])) else: self.assertEqual(resource[res_name][k], keys[k]) class TestBasicGet(NeutronDbPluginV2TestCase): def test_single_get_admin(self): plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id) def test_single_get_tenant(self): plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id) class TestV2HTTPResponse(NeutronDbPluginV2TestCase): def test_create_returns_201(self): res = self._create_network(self.fmt, 'net2', True) self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) def test_list_returns_200(self): req = self.new_list_request('networks') res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) def _check_list_with_fields(self, res, field_name): self.assertEqual(res.status_int, webob.exc.HTTPOk.code) body = self.deserialize(self.fmt, res) # further checks: 1 networks self.assertEqual(len(body['networks']), 1) # 1 field in the network record self.assertEqual(len(body['networks'][0]), 1) # field is 'name' self.assertIn(field_name, body['networks'][0]) def test_list_with_fields(self): self._create_network(self.fmt, 'some_net', True) req = self.new_list_request('networks', params="fields=name") res = req.get_response(self.api) self._check_list_with_fields(res, 'name') def test_list_with_fields_noadmin(self): tenant_id = 'some_tenant' self._create_network(self.fmt, 'some_net', True, tenant_id=tenant_id, set_context=True) req = self.new_list_request('networks', params="fields=name") req.environ['neutron.context'] = context.Context('', tenant_id) res = req.get_response(self.api) self._check_list_with_fields(res, 'name') def test_list_with_fields_noadmin_and_policy_field(self): """If a field used by policy is selected, do not duplicate it. Verifies that if the field parameter explicitly specifies a field which is used by the policy engine, then it is not duplicated in the response. """ tenant_id = 'some_tenant' self._create_network(self.fmt, 'some_net', True, tenant_id=tenant_id, set_context=True) req = self.new_list_request('networks', params="fields=tenant_id") req.environ['neutron.context'] = context.Context('', tenant_id) res = req.get_response(self.api) self._check_list_with_fields(res, 'tenant_id') def test_show_returns_200(self): with self.network() as net: req = self.new_show_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) def test_delete_returns_204(self): res = self._create_network(self.fmt, 'net1', True) net = self.deserialize(self.fmt, res) req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_update_returns_200(self): with self.network() as net: req = self.new_update_request('networks', {'network': {'name': 'steve'}}, net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) def test_update_invalid_json_400(self): with self.network() as net: req = self.new_update_request('networks', '{{"name": "aaa"}}', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_bad_route_404(self): req = self.new_list_request('doohickeys') res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) class TestPortsV2(NeutronDbPluginV2TestCase): def test_create_port_json(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.port(name='myname') as port: for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual('myname', port['port']['name']) def test_create_port_as_admin(self): with self.network() as network: self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='bad_tenant_id', device_id='fake_device', device_owner='fake_owner', fixed_ips=[], set_context=False) def test_create_port_bad_tenant(self): with self.network() as network: self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPNotFound.code, tenant_id='bad_tenant_id', device_id='fake_device', device_owner='fake_owner', fixed_ips=[], set_context=True) def test_create_port_public_network(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.network(shared=True) as network: port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) def test_create_port_public_network_with_ip(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: keys = [('admin_state_up', True), ('status', self.port_create_status), ('fixed_ips', [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}])] port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) def test_create_port_public_network_with_invalid_ip_no_subnet_id(self, expected_error='InvalidIpForNetwork'): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24'): ips = [{'ip_address': '1.1.1.1'}] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=ips, set_context=True) data = self.deserialize(self.fmt, res) msg = str(n_exc.InvalidIpForNetwork(ip_address='1.1.1.1')) self.assertEqual(expected_error, data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_port_public_network_with_invalid_ip_and_subnet_id(self, expected_error='InvalidIpForSubnet'): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1.1.1.1'}] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=ips, set_context=True) data = self.deserialize(self.fmt, res) msg = str(n_exc.InvalidIpForSubnet(ip_address='1.1.1.1')) self.assertEqual(expected_error, data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_ports_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) self._validate_behavior_on_bulk_success(res, 'ports') for p in self.deserialize(self.fmt, res)['ports']: self._delete('ports', p['id']) def test_create_ports_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) self._validate_behavior_on_bulk_success(res, 'ports') for p in self.deserialize(self.fmt, res)['ports']: self._delete('ports', p['id']) def test_create_ports_bulk_wrong_input(self): with self.network() as net: overrides = {1: {'admin_state_up': 'doh'}} res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, override=overrides) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) req = self.new_list_request('ports') res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) ports = self.deserialize(self.fmt, res) self.assertEqual(len(ports['ports']), 0) def test_create_ports_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): orig = manager.NeutronManager.get_plugin().create_port method_to_patch = _get_create_db_method('port') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code ) def test_create_ports_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") ctx = context.get_admin_context() with self.network() as net: plugin = manager.NeutronManager.get_plugin() orig = plugin.create_port method_to_patch = _get_create_db_method('port') with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(), self.port(), self.port()) as ports: self._test_list_resources('port', ports) def test_list_ports_filtered_by_fixed_ip(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(), self.port()) as (port1, port2): fixed_ips = port1['port']['fixed_ips'][0] query_params = """ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s """.strip() % (fixed_ips['ip_address'], '192.168.126.5', fixed_ips['subnet_id']) self._test_list_resources('port', [port1], query_params=query_params) def test_list_ports_public_network(self): with self.network(shared=True) as network: with self.subnet(network) as subnet: with contextlib.nested(self.port(subnet, tenant_id='tenant_1'), self.port(subnet, tenant_id='tenant_2') ) as (port1, port2): # Admin request - must return both ports self._test_list_resources('port', [port1, port2]) # Tenant_1 request - must return single port q_context = context.Context('', 'tenant_1') self._test_list_resources('port', [port1], neutron_context=q_context) # Tenant_2 request - must return single port q_context = context.Context('', 'tenant_2') self._test_list_resources('port', [port2], neutron_context=q_context) def test_list_ports_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(admin_state_up='True', mac_address='00:00:00:00:00:01'), self.port(admin_state_up='False', mac_address='00:00:00:00:00:02'), self.port(admin_state_up='False', mac_address='00:00:00:00:00:03') ) as (port1, port2, port3): self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(admin_state_up='True', mac_address='00:00:00:00:00:01'), self.port(admin_state_up='False', mac_address='00:00:00:00:00:02'), self.port(admin_state_up='False', mac_address='00:00:00:00:00:03') ) as (port1, port2, port3): self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), self.port(mac_address='00:00:00:00:00:02'), self.port(mac_address='00:00:00:00:00:03') ) as (port1, port2, port3): self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), self.port(mac_address='00:00:00:00:00:02'), self.port(mac_address='00:00:00:00:00:03') ) as (port1, port2, port3): self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), self.port(mac_address='00:00:00:00:00:02'), self.port(mac_address='00:00:00:00:00:03') ) as (port1, port2, port3): self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), self.port(mac_address='00:00:00:00:00:02'), self.port(mac_address='00:00:00:00:00:03') ) as (port1, port2, port3): self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_show_port(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port']['id'], sport['port']['id']) def test_delete_port(self): with self.port() as port: self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_delete_port_public_network(self): with self.network(shared=True) as network: port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_port(self): with self.port() as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) def update_port_mac(self, port, updated_fixed_ips=None): orig_mac = port['mac_address'] mac = orig_mac.split(':') mac[5] = '01' if mac[5] != '01' else '00' new_mac = ':'.join(mac) data = {'port': {'mac_address': new_mac}} if updated_fixed_ips: data['port']['fixed_ips'] = updated_fixed_ips req = self.new_update_request('ports', data, port['id']) return req.get_response(self.api), new_mac def _check_v6_auto_address_address(self, port, subnet): if ipv6_utils.is_auto_address_subnet(subnet['subnet']): port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr) def check_update_port_mac( self, expected_status=webob.exc.HTTPOk.code, expected_error='StateInvalid', subnet=None, device_owner=DEVICE_OWNER_COMPUTE, updated_fixed_ips=None, host_arg={}, arg_list=[]): with self.port(device_owner=device_owner, subnet=subnet, arg_list=arg_list, **host_arg) as port: self.assertIn('mac_address', port['port']) res, new_mac = self.update_port_mac( port['port'], updated_fixed_ips=updated_fixed_ips) self.assertEqual(expected_status, res.status_int) if expected_status == webob.exc.HTTPOk.code: result = self.deserialize(self.fmt, res) self.assertIn('port', result) self.assertEqual(new_mac, result['port']['mac_address']) if subnet and subnet['subnet']['ip_version'] == 6: self._check_v6_auto_address_address(port, subnet) else: error = self.deserialize(self.fmt, res) self.assertEqual(expected_error, error['NeutronError']['type']) def test_update_port_mac(self): self.check_update_port_mac() # sub-classes for plugins/drivers that support mac address update # override this method def test_update_port_mac_ip(self): with self.subnet() as subnet: updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}] self.check_update_port_mac(subnet=subnet, updated_fixed_ips=updated_fixed_ips) def test_update_port_mac_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: self.assertTrue( ipv6_utils.is_auto_address_subnet(subnet['subnet'])) self.check_update_port_mac(subnet=subnet) def test_update_port_mac_bad_owner(self): self.check_update_port_mac( device_owner=DEVICE_OWNER_NOT_COMPUTE, expected_status=webob.exc.HTTPConflict.code, expected_error='UnsupportedPortDeviceOwner') def check_update_port_mac_used(self, expected_error='MacAddressInUse'): with self.subnet() as subnet: with self.port(subnet=subnet) as port: with self.port(subnet=subnet) as port2: self.assertIn('mac_address', port['port']) new_mac = port2['port']['mac_address'] data = {'port': {'mac_address': new_mac}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual(expected_error, error['NeutronError']['type']) def test_update_port_mac_used(self): self.check_update_port_mac_used() def test_update_port_not_admin(self): res = self._create_network(self.fmt, 'net1', True, tenant_id='not_admin', set_context=True) net1 = self.deserialize(self.fmt, res) res = self._create_port(self.fmt, net1['network']['id'], tenant_id='not_admin', set_context=True) port = self.deserialize(self.fmt, res) data = {'port': {'admin_state_up': False}} neutron_context = context.Context('', 'not_admin') port = self._update('ports', port['port']['id'], data, neutron_context=neutron_context) self.assertEqual(port['port']['admin_state_up'], False) def test_update_device_id_unchanged(self): with self.port() as port: data = {'port': {'admin_state_up': True, 'device_id': port['port']['device_id']}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], True) def test_update_device_id_null(self): with self.port() as port: data = {'port': {'device_id': None}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_delete_network_if_port_exists(self): with self.port() as port: req = self.new_delete_request('networks', port['port']['network_id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_delete_network_port_exists_owned_by_network(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_update_port_delete_ip(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def test_no_more_port_exception(self): with self.subnet(cidr='10.0.0.0/32', enable_dhcp=False) as subnet: id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, id) data = self.deserialize(self.fmt, res) msg = str(n_exc.IpAddressGenerationFailure(net_id=id)) self.assertEqual(data['NeutronError']['message'], msg) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_update_port_update_ip(self): """Test update of port IP. Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.10') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_update_ip_address_only(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}, {'ip_address': "10.0.0.2"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 2) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.10', 'subnet_id': subnet['subnet']['id']}, ips) def test_update_port_update_ips(self): """Update IP and associate new IP on port. Check a port update with the specified subnet_id's. A IP address will be allocated for each subnet_id. """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.3') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_add_additional_ip(self): """Test update of port with additional IP.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(len(ips), 2) self.assertIn({'ip_address': '10.0.0.3', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.4', 'subnet_id': subnet['subnet']['id']}, ips) def test_update_port_invalid_fixed_ip_address_v6_slaac(self): with self.subnet( cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC, gateway_ip=attributes.ATTR_NOT_SPECIFIED) as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(ips[0]['ip_address'], eui_addr) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2607:f0d0:1002:51::5'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) err = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) self.assertEqual(err['NeutronError']['type'], 'InvalidInput') def test_requested_duplicate_mac(self): with self.port() as port: mac = port['port']['mac_address'] # check that MAC address matches base MAC base_mac = cfg.CONF.base_mac[0:2] self.assertTrue(mac.startswith(base_mac)) kwargs = {"mac_address": mac} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_mac_generation(self): cfg.CONF.set_override('base_mac', "12:34:56:00:00:00") with self.port() as port: mac = port['port']['mac_address'] self.assertTrue(mac.startswith("12:34:56")) def test_mac_generation_4octet(self): cfg.CONF.set_override('base_mac', "12:34:56:78:00:00") with self.port() as port: mac = port['port']['mac_address'] self.assertTrue(mac.startswith("12:34:56:78")) def test_bad_mac_format(self): cfg.CONF.set_override('base_mac', "bad_mac") try: self.plugin._check_base_mac_format() except Exception: return self.fail("No exception for illegal base_mac format") def test_mac_exhaustion(self): # rather than actually consuming all MAC (would take a LONG time) # we try to allocate an already allocated mac address cfg.CONF.set_override('mac_generation_retries', 3) res = self._create_network(fmt=self.fmt, name='net1', admin_state_up=True) network = self.deserialize(self.fmt, res) net_id = network['network']['id'] error = n_exc.MacAddressInUse(net_id=net_id, mac='00:11:22:33:44:55') with mock.patch.object( neutron.db.db_base_plugin_v2.NeutronDbPluginV2, '_create_port_with_mac', side_effect=error) as create_mock: res = self._create_port(self.fmt, net_id=net_id) self.assertEqual(res.status_int, webob.exc.HTTPServiceUnavailable.code) self.assertEqual(3, create_mock.call_count) def test_requested_duplicate_ip(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Check configuring of duplicate IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': ips[0]['ip_address']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_requested_subnet_id(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Request a IP from specific subnet kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ips = port2['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.3') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._delete('ports', port2['port']['id']) def test_requested_subnet_id_not_on_network(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: # Create new network res = self._create_network(fmt=self.fmt, name='net2', admin_state_up=True) network2 = self.deserialize(self.fmt, res) subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1", "1.1.1.0/24", ip_version=4) net_id = port['port']['network_id'] # Request a IP from specific subnet kwargs = {"fixed_ips": [{'subnet_id': subnet2['subnet']['id']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_overlapping_subnets(self): with self.subnet() as subnet: tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet(self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='10.0.0.225/28', ip_version=4, gateway_ip=attributes.ATTR_NOT_SPECIFIED) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_requested_subnet_id_v4_and_v6(self): with self.subnet() as subnet: # Get a IPv4 and IPv6 address tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet( self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='2607:f0d0:1002:51::/124', ip_version=6, gateway_ip=attributes.ATTR_NOT_SPECIFIED) subnet2 = self.deserialize(self.fmt, res) kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port3 = self.deserialize(self.fmt, res) ips = port3['port']['fixed_ips'] self.assertEqual(len(ips), 2) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '2607:f0d0:1002:51::2', 'subnet_id': subnet2['subnet']['id']}, ips) res = self._create_port(self.fmt, net_id=net_id) port4 = self.deserialize(self.fmt, res) # Check that a v4 and a v6 address are allocated ips = port4['port']['fixed_ips'] self.assertEqual(len(ips), 2) self.assertIn({'ip_address': '10.0.0.3', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '2607:f0d0:1002:51::3', 'subnet_id': subnet2['subnet']['id']}, ips) self._delete('ports', port3['port']['id']) self._delete('ports', port4['port']['id']) def test_requested_invalid_fixed_ip_address_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2607:f0d0:1002:51::5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_allocate_specific_ip') def test_requested_fixed_ip_address_v6_slaac_router_iface( self, alloc_specific_ip): with self.subnet(gateway_ip='fe80::1', cidr='fe80::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': 'fe80::1'}]} net_id = subnet['subnet']['network_id'] device_owner = constants.DEVICE_OWNER_ROUTER_INTF res = self._create_port(self.fmt, net_id=net_id, device_owner=device_owner, **kwargs) port = self.deserialize(self.fmt, res) self.assertEqual(len(port['port']['fixed_ips']), 1) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], 'fe80::1') self.assertFalse(alloc_specific_ip.called) def test_requested_subnet_id_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: with self.port(subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}]) as port: port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr) def test_requested_subnet_id_v4_and_v6_slaac(self): with self.network() as network: with contextlib.nested( self.subnet(network), self.subnet(network, cidr='2607:f0d0:1002:51::/64', ip_version=6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC) ) as (subnet, subnet2): with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}] ) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 2) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) port_mac = port['port']['mac_address'] subnet_cidr = subnet2['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( subnet_cidr, port_mac)) self.assertIn({'ip_address': eui_addr, 'subnet_id': subnet2['subnet']['id']}, ips) def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): with self.network() as network: # Create an IPv4 and an IPv6 SLAAC subnet on the network with contextlib.nested( self.subnet(network), self.subnet(network, cidr='2607:f0d0:1002:51::/64', ip_version=6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC)): # Create a router port without specifying fixed_ips port = self._make_port( self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_ROUTER_INTF) # Router port should only have an IPv4 address fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(fixed_ips)) self.assertEqual('10.0.0.2', fixed_ips[0]['ip_address']) def _make_v6_subnet(self, network, ra_addr_mode): return (self._make_subnet(self.fmt, network, gateway='fe80::1', cidr='fe80::/64', ip_version=6, ipv6_ra_mode=ra_addr_mode, ipv6_address_mode=ra_addr_mode)) @staticmethod def _calc_ipv6_addr_by_EUI64(port, subnet): port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] return str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet), port['port']['fixed_ips'][0]['ip_address']) def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode): """Test port create with an IPv6 subnet incl in fixed IPs.""" with self.network(name='net') as network: subnet = self._make_v6_subnet(network, addr_mode) subnet_id = subnet['subnet']['id'] fixed_ips = [{'subnet_id': subnet_id}] with self.port(subnet=subnet, fixed_ips=fixed_ips) as port: if addr_mode == constants.IPV6_SLAAC: exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet) else: exp_ip_addr = 'fe80::2' port_fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(port_fixed_ips)) self.assertEqual(exp_ip_addr, port_fixed_ips[0]['ip_address']) def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.IPV6_SLAAC) def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.DHCPV6_STATEFUL) def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dicts = [ {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': 'fe80::1', 'cidr': 'fe80::/64', 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe81::1', 'cidr': 'fe81::/64', 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe82::1', 'cidr': 'fe82::/64', 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, {'gateway': 'fe83::1', 'cidr': 'fe83::/64', 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] subnets = {} for sub_dict in sub_dicts: subnet = self._make_subnet( self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) subnets[subnet['subnet']['id']] = sub_dict res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) # Since the create port request was made without a list of fixed IPs, # the port should be associated with addresses for one of the # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 # SLAAC subnets. self.assertEqual(4, len(port['port']['fixed_ips'])) addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, constants.IPV6_SLAAC: 0} for fixed_ip in port['port']['fixed_ips']: subnet_id = fixed_ip['subnet_id'] if subnet_id in subnets: addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 self.assertEqual(1, addr_mode_count[None]) self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) def test_delete_port_with_ipv6_slaac_address(self): """Test that a port with an IPv6 SLAAC address can be deleted.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port that has an associated IPv6 SLAAC address self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) # Confirm that the port can be deleted self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self): """Test port update with an IPv6 SLAAC subnet in fixed IPs.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port including only the IPv6 SLAAC subnet data = {'port': {'fixed_ips': [{'subnet_id': subnet_v6['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should only have an address corresponding to IPv6 SLAAC subnet ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet_v6), ips[0]['ip_address']) def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self): """Test port update excluding IPv6 SLAAC subnet from fixed ips.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port including only the IPv4 subnet data = {'port': {'fixed_ips': [{'subnet_id': subnet_v4['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should still have an addr corresponding to IPv6 SLAAC subnet ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) eui_addr = self._calc_ipv6_addr_by_EUI64(port, subnet_v6) expected_v6_ip = {'subnet_id': subnet_v6['subnet']['id'], 'ip_address': eui_addr} self.assertIn(expected_v6_ip, ips) def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) v6_subnet_1 = self._make_subnet(self.fmt, network, gateway='2001:100::1', cidr='2001:100::0/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC) v6_subnet_2 = self._make_subnet(self.fmt, network, gateway='2001:200::1', cidr='2001:200::0/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) self.assertEqual(len(port['port']['fixed_ips']), 2) port_mac = port['port']['mac_address'] cidr_1 = v6_subnet_1['subnet']['cidr'] cidr_2 = v6_subnet_2['subnet']['cidr'] eui_addr_1 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_1, port_mac)) eui_addr_2 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_2, port_mac)) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr_1) self.assertEqual(port['port']['fixed_ips'][1]['ip_address'], eui_addr_2) def test_range_allocation(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/29') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 5) alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5', '10.0.0.6'] for ip in ips: self.assertIn(ip['ip_address'], alloc) self.assertEqual(ip['subnet_id'], subnet['subnet']['id']) alloc.remove(ip['ip_address']) self.assertEqual(len(alloc), 0) self._delete('ports', port['port']['id']) with self.subnet(gateway_ip='11.0.0.6', cidr='11.0.0.0/29') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 5) alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4', '11.0.0.5'] for ip in ips: self.assertIn(ip['ip_address'], alloc) self.assertEqual(ip['subnet_id'], subnet['subnet']['id']) alloc.remove(ip['ip_address']) self.assertEqual(len(alloc), 0) self._delete('ports', port['port']['id']) def test_requested_invalid_fixed_ips(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Test invalid subnet_id kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': '00000000-ffff-ffff-ffff-000000000000'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) # Test invalid IP address on specified subnet_id kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1.1.1.1'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) # Test invalid addresses - IP's not on subnet or network # address or broadcast address bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255'] net_id = port['port']['network_id'] for ip in bad_ips: kwargs = {"fixed_ips": [{'ip_address': ip}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) # Enable allocation of gateway address kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.1'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ips = port2['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.1') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._delete('ports', port2['port']['id']) def test_invalid_ip(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1011.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_requested_split(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ports_to_delete = [] ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ports_to_delete.append(port2) ips = port2['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.5') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Allocate specific IP's allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6'] for a in allocated: res = self._create_port(self.fmt, net_id=net_id) port2 = self.deserialize(self.fmt, res) ports_to_delete.append(port2) ips = port2['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], a) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) for p in ports_to_delete: self._delete('ports', p['port']['id']) def test_duplicate_ips(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_fixed_ip_invalid_subnet_id(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid', 'ip_address': '10.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_fixed_ip_invalid_ip(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.55555'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_requested_ips_only(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], '10.0.0.2') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21', '10.0.0.3', '10.0.0.17', '10.0.0.19'] ports_to_delete = [] for i in ips_only: kwargs = {"fixed_ips": [{'ip_address': i}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ports_to_delete.append(port) ips = port['port']['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], i) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) for p in ports_to_delete: self._delete('ports', p['port']['id']) def test_invalid_admin_state(self): with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'admin_state_up': 7, 'fixed_ips': []}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_invalid_mac_address(self): with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'admin_state_up': 1, 'mac_address': 'mac', 'fixed_ips': []}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_max_fixed_ips_exceeded(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/24') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_max_fixed_ips_exceeded(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/24') as subnet: with self.port(subnet) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_delete_ports_by_device_id(self): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() with self.subnet() as subnet: with contextlib.nested( self.port(subnet=subnet, device_id='owner1'), self.port(subnet=subnet, device_id='owner1'), self.port(subnet=subnet, device_id='owner2'), ) as (p1, p2, p3): network_id = subnet['subnet']['network_id'] plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) self._show('ports', p1['port']['id'], expected_code=webob.exc.HTTPNotFound.code) self._show('ports', p2['port']['id'], expected_code=webob.exc.HTTPNotFound.code) self._show('ports', p3['port']['id'], expected_code=webob.exc.HTTPOk.code) def _test_delete_ports_by_device_id_second_call_failure(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: with contextlib.nested( self.port(subnet=subnet, device_id='owner1'), self.port(subnet=subnet, device_id='owner1'), self.port(subnet=subnet, device_id='owner2'), ) as (p1, p2, p3): orig = plugin.delete_port with mock.patch.object(plugin, 'delete_port') as del_port: def side_effect(*args, **kwargs): return self._fail_second_call(del_port, orig, *args, **kwargs) del_port.side_effect = side_effect network_id = subnet['subnet']['network_id'] self.assertRaises(n_exc.NeutronException, plugin.delete_ports_by_device_id, ctx, 'owner1', network_id) statuses = { self._show_response('ports', p['port']['id']).status_int for p in [p1, p2]} expected = {webob.exc.HTTPNotFound.code, webob.exc.HTTPOk.code} self.assertEqual(expected, statuses) self._show('ports', p3['port']['id'], expected_code=webob.exc.HTTPOk.code) def test_delete_ports_by_device_id_second_call_failure(self): plugin = manager.NeutronManager.get_plugin() self._test_delete_ports_by_device_id_second_call_failure(plugin) def _test_delete_ports_ignores_port_not_found(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: with contextlib.nested( self.port(subnet=subnet, device_id='owner1'), mock.patch.object(plugin, 'delete_port') ) as (p, del_port): del_port.side_effect = n_exc.PortNotFound( port_id=p['port']['id'] ) network_id = subnet['subnet']['network_id'] try: plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) except n_exc.PortNotFound: self.fail("delete_ports_by_device_id unexpectedly raised " "a PortNotFound exception. It should ignore " "this exception because it is often called at " "the same time other concurrent operations are " "deleting some of the same ports.") def test_delete_ports_ignores_port_not_found(self): plugin = manager.NeutronManager.get_plugin() self._test_delete_ports_ignores_port_not_found(plugin) class TestNetworksV2(NeutronDbPluginV2TestCase): # NOTE(cerberus): successful network update and delete are # effectively tested above def test_create_network(self): name = 'net1' keys = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', self.net_create_status), ('shared', False)] with self.network(name=name) as net: for k, v in keys: self.assertEqual(net['network'][k], v) def test_create_public_network(self): name = 'public_net' keys = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', self.net_create_status), ('shared', True)] with self.network(name=name, shared=True) as net: for k, v in keys: self.assertEqual(net['network'][k], v) def test_create_public_network_no_admin_tenant(self): name = 'public_net' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.network(name=name, shared=True, tenant_id="another_tenant", set_context=True): pass self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPForbidden.code) def test_update_network(self): with self.network() as network: data = {'network': {'name': 'a_brand_new_name'}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['name'], data['network']['name']) def test_update_shared_network_noadmin_returns_403(self): with self.network(shared=True) as network: data = {'network': {'name': 'a_brand_new_name'}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.api) # The API layer always returns 404 on updates in place of 403 self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) def test_update_network_set_shared(self): with self.network(shared=False) as network: data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['network']['shared']) def test_update_network_set_shared_owner_returns_403(self): with self.network(shared=False) as network: net_owner = network['network']['tenant_id'] data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('u', net_owner) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPForbidden.code) def test_update_network_with_subnet_set_shared(self): with self.network(shared=False) as network: with self.subnet(network=network) as subnet: data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['network']['shared']) # must query db to see whether subnet's shared attribute # has been updated or not ctx = context.Context('', '', is_admin=True) subnet_db = manager.NeutronManager.get_plugin()._get_subnet( ctx, subnet['subnet']['id']) self.assertEqual(subnet_db['shared'], True) def test_update_network_set_not_shared_single_tenant(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertFalse(res['network']['shared']) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_other_tenant_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(req.get_response(self.api).status_int, webob.exc.HTTPConflict.code) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_multi_tenants_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) res2 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(req.get_response(self.api).status_int, webob.exc.HTTPConflict.code) port1 = self.deserialize(self.fmt, res1) port2 = self.deserialize(self.fmt, res2) self._delete('ports', port1['port']['id']) self._delete('ports', port2['port']['id']) def test_update_network_set_not_shared_multi_tenants2_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(req.get_response(self.api).status_int, webob.exc.HTTPConflict.code) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_create_networks_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") res = self._create_network_bulk(self.fmt, 2, 'test', True) self._validate_behavior_on_bulk_success(res, 'networks') def test_create_networks_bulk_native_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 4 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') res = self._create_network_bulk(self.fmt, quota + 1, 'test', True) self._validate_behavior_on_bulk_failure( res, 'networks', errcode=webob.exc.HTTPConflict.code) def test_create_networks_bulk_tenants_and_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 2 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') networks = [{'network': {'name': 'n1', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n1', 'tenant_id': 't1'}}, {'network': {'name': 'n2', 'tenant_id': 't1'}}] res = self._create_bulk_from_list(self.fmt, 'network', networks) self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) def test_create_networks_bulk_tenants_and_quotas_fail(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 2 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') networks = [{'network': {'name': 'n1', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n1', 'tenant_id': 't1'}}, {'network': {'name': 'n3', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': 't1'}}] res = self._create_bulk_from_list(self.fmt, 'network', networks) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_create_networks_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): res = self._create_network_bulk(self.fmt, 2, 'test', True) self._validate_behavior_on_bulk_success(res, 'networks') def test_create_networks_bulk_wrong_input(self): res = self._create_network_bulk(self.fmt, 2, 'test', True, override={1: {'admin_state_up': 'doh'}}) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) req = self.new_list_request('networks') res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPOk.code) nets = self.deserialize(self.fmt, res) self.assertEqual(len(nets['networks']), 0) def test_create_networks_bulk_emulated_plugin_failure(self): real_has_attr = hasattr def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) orig = manager.NeutronManager.get_plugin().create_network #ensures the API choose the emulation code path with mock.patch('__builtin__.hasattr', new=fakehasattr): method_to_patch = _get_create_db_method('network') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'networks', webob.exc.HTTPServerError.code ) def test_create_networks_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") orig = manager.NeutronManager.get_plugin().create_network method_to_patch = _get_create_db_method('network') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'networks', webob.exc.HTTPServerError.code ) def test_list_networks(self): with contextlib.nested(self.network(), self.network(), self.network()) as networks: self._test_list_resources('network', networks) def test_list_networks_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with contextlib.nested(self.network(admin_status_up=True, name='net1'), self.network(admin_status_up=False, name='net2'), self.network(admin_status_up=False, name='net3') ) as (net1, net2, net3): self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) def test_list_networks_with_sort_extended_attr_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with contextlib.nested(self.network(admin_status_up=True, name='net1'), self.network(admin_status_up=False, name='net2'), self.network(admin_status_up=False, name='net3') ): req = self.new_list_request( 'networks', params='sort_key=provider:segmentation_id&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_networks_with_sort_remote_key_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with contextlib.nested(self.network(admin_status_up=True, name='net1'), self.network(admin_status_up=False, name='net2'), self.network(admin_status_up=False, name='net3') ): req = self.new_list_request( 'networks', params='sort_key=subnets&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_networks_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() with contextlib.nested(self.network(admin_status_up=True, name='net1'), self.network(admin_status_up=False, name='net2'), self.network(admin_status_up=False, name='net3') ) as (net1, net2, net3): self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) def test_list_networks_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with contextlib.nested(self.network(name='net1'), self.network(name='net2'), self.network(name='net3') ) as (net1, net2, net3): self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with contextlib.nested(self.network(name='net1'), self.network(name='net2'), self.network(name='net3') ) as (net1, net2, net3): self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_without_pk_in_fields_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with contextlib.nested(self.network(name='net1', shared=True), self.network(name='net2', shared=False), self.network(name='net3', shared=True) ) as (net1, net2, net3): self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, query_params="fields=name", verify_key='name') def test_list_networks_without_pk_in_fields_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with contextlib.nested(self.network(name='net1'), self.network(name='net2'), self.network(name='net3') ) as (net1, net2, net3): self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, query_params="fields=shared", verify_key='shared') def test_list_networks_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with contextlib.nested(self.network(name='net1'), self.network(name='net2'), self.network(name='net3') ) as (net1, net2, net3): self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with contextlib.nested(self.network(name='net1'), self.network(name='net2'), self.network(name='net3') ) as (net1, net2, net3): self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_parameters(self): with contextlib.nested(self.network(name='net1', admin_state_up=False), self.network(name='net2')) as (net1, net2): query_params = 'admin_state_up=False' self._test_list_resources('network', [net1], query_params=query_params) query_params = 'admin_state_up=True' self._test_list_resources('network', [net2], query_params=query_params) def test_list_networks_with_fields(self): with self.network(name='net1') as net1: req = self.new_list_request('networks', params='fields=name') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(res['networks'])) self.assertEqual(res['networks'][0]['name'], net1['network']['name']) self.assertIsNone(res['networks'][0].get('id')) def test_list_networks_with_parameters_invalid_values(self): with contextlib.nested(self.network(name='net1', admin_state_up=False), self.network(name='net2')) as (net1, net2): req = self.new_list_request('networks', params='admin_state_up=fake') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_shared_networks_with_non_admin_user(self): with contextlib.nested(self.network(shared=False, name='net1', tenant_id='tenant1'), self.network(shared=True, name='net2', tenant_id='another_tenant'), self.network(shared=False, name='net3', tenant_id='another_tenant') ) as (net1, net2, net3): ctx = context.Context(user_id='non_admin', tenant_id='tenant1', is_admin=False) self._test_list_resources('network', (net1, net2), ctx) def test_show_network(self): with self.network(name='net1') as net: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['name'], net['network']['name']) def test_show_network_with_subnet(self): with self.network(name='net1') as net: with self.subnet(net) as subnet: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['subnets'][0], subnet['subnet']['id']) def test_invalid_admin_status(self): value = [[7, False, webob.exc.HTTPClientError.code], [True, True, webob.exc.HTTPCreated.code], ["True", True, webob.exc.HTTPCreated.code], ["true", True, webob.exc.HTTPCreated.code], [1, True, webob.exc.HTTPCreated.code], ["False", False, webob.exc.HTTPCreated.code], [False, False, webob.exc.HTTPCreated.code], ["false", False, webob.exc.HTTPCreated.code], ["7", False, webob.exc.HTTPClientError.code]] for v in value: data = {'network': {'name': 'net', 'admin_state_up': v[0], 'tenant_id': self._tenant_id}} network_req = self.new_create_request('networks', data) req = network_req.get_response(self.api) self.assertEqual(req.status_int, v[2]) if v[2] == webob.exc.HTTPCreated.code: res = self.deserialize(self.fmt, req) self.assertEqual(res['network']['admin_state_up'], v[1]) def test_get_user_allocation_for_dhcp_port_returns_none(self): plugin = manager.NeutronManager.get_plugin() if not hasattr(plugin, '_subnet_get_user_allocation'): return with contextlib.nested( self.network(), self.network() ) as (net, net1): with contextlib.nested( self.subnet(network=net, cidr='10.0.0.0/24'), self.subnet(network=net1, cidr='10.0.1.0/24') ) as (subnet, subnet1): with contextlib.nested( self.port(subnet=subnet, device_owner='network:dhcp'), self.port(subnet=subnet1) ) as (p, p2): # check that user allocations on another network don't # affect _subnet_get_user_allocation method res = plugin._subnet_get_user_allocation( context.get_admin_context(), subnet['subnet']['id']) self.assertIsNone(res) class TestSubnetsV2(NeutronDbPluginV2TestCase): def _test_create_subnet(self, network=None, expected=None, **kwargs): keys = kwargs.copy() keys.setdefault('cidr', '10.0.0.0/24') keys.setdefault('ip_version', 4) keys.setdefault('enable_dhcp', True) with self.subnet(network=network, **keys) as subnet: # verify the response has each key with the correct value self._validate_resource(subnet, keys, 'subnet') # verify the configured validations are correct if expected: self._compare_resource(subnet, expected, 'subnet') self._delete('subnets', subnet['subnet']['id']) return subnet def test_create_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' subnet = self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr) self.assertEqual(4, subnet['subnet']['ip_version']) self.assertIn('name', subnet['subnet']) def test_create_subnet_with_network_different_tenant(self): with self.network(shared=False, tenant_id='tenant1') as network: ctx = context.Context(user_id='non_admin', tenant_id='tenant2', is_admin=False) data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'gateway_ip': '10.0.2.1'}} req = self.new_create_request('subnets', data, self.fmt, context=ctx) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_two_subnets(self): gateway_ips = ['10.0.0.1', '10.0.1.1'] cidrs = ['10.0.0.0/24', '10.0.1.0/24'] with self.network() as network: with self.subnet(network=network, gateway_ip=gateway_ips[0], cidr=cidrs[0]): with self.subnet(network=network, gateway_ip=gateway_ips[1], cidr=cidrs[1]): net_req = self.new_show_request('networks', network['network']['id']) raw_res = net_req.get_response(self.api) net_res = self.deserialize(self.fmt, raw_res) for subnet_id in net_res['network']['subnets']: sub_req = self.new_show_request('subnets', subnet_id) raw_res = sub_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertIn(sub_res['subnet']['cidr'], cidrs) self.assertIn(sub_res['subnet']['gateway_ip'], gateway_ips) def test_create_two_subnets_same_cidr_returns_400(self): gateway_ip_1 = '10.0.0.1' cidr_1 = '10.0.0.0/24' gateway_ip_2 = '10.0.0.10' cidr_2 = '10.0.0.0/24' with self.network() as network: with self.subnet(network=network, gateway_ip=gateway_ip_1, cidr=cidr_1): with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(network=network, gateway_ip=gateway_ip_2, cidr=cidr_2): pass self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_bad_V4_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0', 'ip_version': '4', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_no_ip_version(self): with self.network() as network: cfg.CONF.set_override('default_ipv4_subnet_pool', None) cfg.CONF.set_override('default_ipv6_subnet_pool', None) data = {'subnet': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_only_ip_version_v6_no_pool(self): with self.network() as network: tenant_id = network['network']['tenant_id'] cfg.CONF.set_override('default_ipv6_subnet_pool', None) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_only_ip_version_v4(self): with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=False, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25') as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] cfg.CONF.set_override('default_ipv4_subnet_pool', subnetpool_id) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '4', 'prefixlen': '27', 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertTrue(ip_net in netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(27, ip_net.prefixlen) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) def test_create_subnet_only_ip_version_v6(self): with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '2000::/56' with self.subnetpool(prefixes=[subnetpool_prefix], admin=False, name="My ipv6 subnet pool", tenant_id=tenant_id, min_prefixlen='64') as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] cfg.CONF.set_override('default_ipv6_subnet_pool', subnetpool_id) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] self.assertEqual(subnetpool_id, subnet['subnetpool_id']) ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertTrue(ip_net in netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(64, ip_net.prefixlen) def test_create_subnet_bad_V4_cidr_prefix_len(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': constants.IPv4_ANY, 'ip_version': '4', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '0.0.0.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_V6_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::', 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'fe80::1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_V6_slaac_big_prefix(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '2014::/65', 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'fe80::1', 'ipv6_address_mode': 'slaac'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): cidr_1 = '10.0.0.0/23' cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', True) with contextlib.nested(self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2)): pass def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): cidr_1 = '10.0.0.0/23' cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', False) with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with contextlib.nested(self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2)): pass self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnets_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): orig = manager.NeutronManager.get_plugin().create_subnet method_to_patch = _get_create_db_method('subnet') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._delete('networks', net['network']['id']) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'subnets', webob.exc.HTTPServerError.code ) def test_create_subnets_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") plugin = manager.NeutronManager.get_plugin() orig = plugin.create_subnet method_to_patch = _get_create_db_method('subnet') with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'subnets', webob.exc.HTTPServerError.code ) def test_delete_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_delete_subnet_port_exists_owned_by_network(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) self._create_port(self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_DHCP) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_delete_subnet_dhcp_port_associated_with_other_subnets(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1', '10.0.0.0/24', ip_version=4) subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1', '10.0.1.0/24', ip_version=4) res = self._create_port(self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_DHCP, fixed_ips=[ {'subnet_id': subnet1['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']} ]) port = self.deserialize(self.fmt, res) expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']] self.assertEqual(expected_subnets, [s['subnet_id'] for s in port['port']['fixed_ips']]) req = self.new_delete_request('subnets', subnet1['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) port = self._show('ports', port['port']['id']) expected_subnets = [subnet2['subnet']['id']] self.assertEqual(expected_subnets, [s['subnet_id'] for s in port['port']['fixed_ips']]) req = self.new_delete_request('subnets', subnet2['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) port = self._show('ports', port['port']['id']) self.assertFalse(port['port']['fixed_ips']) def test_delete_subnet_port_exists_owned_by_other(self): with self.subnet() as subnet: with self.port(subnet=subnet): id = subnet['subnet']['id'] req = self.new_delete_request('subnets', id) res = req.get_response(self.api) data = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) msg = str(n_exc.SubnetInUse(subnet_id=id)) self.assertEqual(data['NeutronError']['message'], msg) def test_delete_subnet_with_other_subnet_on_network_still_in_use(self): with self.network() as network: with contextlib.nested( self.subnet(network=network), self.subnet(network=network, cidr='10.0.1.0/24'), ) as (subnet1, subnet2): subnet1_id = subnet1['subnet']['id'] subnet2_id = subnet2['subnet']['id'] with self.port( subnet=subnet1, fixed_ips=[{'subnet_id': subnet1_id}]): req = self.new_delete_request('subnets', subnet2_id) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def _create_slaac_subnet_and_port(self, port_owner=None): # Create an IPv6 SLAAC subnet and a port using that subnet res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway='fe80::1', cidr='fe80::/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) kwargs = {} if port_owner: kwargs['device_owner'] = port_owner if port_owner in constants.ROUTER_INTERFACE_OWNERS: kwargs['fixed_ips'] = [{'ip_address': 'fe80::1'}] res = self._create_port(self.fmt, net_id=network['network']['id'], **kwargs) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) # The port should have an address from the subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(sport['port']['fixed_ips'])) return subnet, port def test_delete_subnet_ipv6_slaac_port_exists(self): """Test IPv6 SLAAC subnet delete when a port is still using subnet.""" subnet, port = self._create_slaac_subnet_and_port() # Delete the subnet req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) # The port should no longer have an address from the deleted subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(0, len(sport['port']['fixed_ips'])) def test_delete_subnet_ipv6_slaac_router_port_exists(self): """Test IPv6 SLAAC subnet delete with a router port using the subnet""" subnet, port = self._create_slaac_subnet_and_port( constants.DEVICE_OWNER_ROUTER_INTF) # Delete the subnet and assert that we get a HTTP 409 error req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) # The subnet should still exist and the port should still have an # address from the subnet req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) res = req.get_response(self.api) ssubnet = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNotNone(ssubnet) req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(sport['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in sport['port']['fixed_ips']] self.assertIn(subnet['subnet']['id'], port_subnet_ids) def test_delete_network(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('networks', network['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) req = self.new_show_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_subnet_bad_tenant(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.2.0/24', webob.exc.HTTPNotFound.code, ip_version=4, tenant_id='bad_tenant_id', gateway_ip='10.0.2.1', device_owner='fake_owner', set_context=True) def test_create_subnet_as_admin(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.2.0/24', webob.exc.HTTPCreated.code, ip_version=4, tenant_id='bad_tenant_id', gateway_ip='10.0.2.1', device_owner='fake_owner', set_context=False) def test_create_subnet_nonzero_cidr(self): with contextlib.nested( self.subnet(cidr='10.129.122.5/8'), self.subnet(cidr='11.129.122.5/15'), self.subnet(cidr='12.129.122.5/16'), self.subnet(cidr='13.129.122.5/18'), self.subnet(cidr='14.129.122.5/22'), self.subnet(cidr='15.129.122.5/24'), self.subnet(cidr='16.129.122.5/28'), self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) ) as subs: # the API should accept and correct these for users self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8') self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15') self.assertEqual(subs[2]['subnet']['cidr'], '12.129.0.0/16') self.assertEqual(subs[3]['subnet']['cidr'], '13.129.64.0/18') self.assertEqual(subs[4]['subnet']['cidr'], '14.129.120.0/22') self.assertEqual(subs[5]['subnet']['cidr'], '15.129.122.0/24') self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28') self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32') def _test_create_subnet_with_invalid_netmask_returns_400(self, *args): with self.network() as network: for cidr in args: ip_version = netaddr.IPNetwork(cidr).version self._create_subnet(self.fmt, network['network']['id'], cidr, webob.exc.HTTPClientError.code, ip_version=ip_version) def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self): self._test_create_subnet_with_invalid_netmask_returns_400( '10.0.0.0/31', '10.0.0.0/32') def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self): self._test_create_subnet_with_invalid_netmask_returns_400( 'cafe:cafe::/127', 'cafe:cafe::/128') def test_create_subnet_bad_ip_version(self): with self.network() as network: # Check bad IP version data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 'abc', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_ip_version_null(self): with self.network() as network: # Check bad IP version data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': None, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_uuid(self): with self.network() as network: # Check invalid UUID data = {'subnet': {'network_id': None, 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_boolean(self): with self.network() as network: # Check invalid boolean data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'enable_dhcp': None, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_pools(self): with self.network() as network: # Check allocation pools allocation_pools = [[{'end': '10.0.0.254'}], [{'start': '10.0.0.254'}], [{'start': '1000.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}, {'end': '10.0.0.254'}], None, [{'start': '10.0.0.2', 'end': '10.0.0.3'}, {'start': '10.0.0.2', 'end': '10.0.0.3'}]] tenant_id = network['network']['tenant_id'] for pool in allocation_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'allocation_pools': pool}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_nameserver(self): with self.network() as network: # Check nameservers nameserver_pools = [['1100.0.0.2'], ['1.1.1.2', '1.1000.1.3'], ['1.1.1.2', '1.1.1.2']] tenant_id = network['network']['tenant_id'] for nameservers in nameserver_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'dns_nameservers': nameservers}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_bad_hostroutes(self): with self.network() as network: # Check hostroutes hostroute_pools = [[{'destination': '100.0.0.0/24'}], [{'nexthop': '10.0.2.20'}], [{'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}, {'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}]] tenant_id = network['network']['tenant_id'] for hostroutes in hostroute_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'host_routes': hostroutes}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_defaults(self): gateway = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] enable_dhcp = True subnet = self._test_create_subnet() # verify cidr & gw have been correctly generated self.assertEqual(subnet['subnet']['cidr'], cidr) self.assertEqual(subnet['subnet']['gateway_ip'], gateway) self.assertEqual(subnet['subnet']['enable_dhcp'], enable_dhcp) self.assertEqual(subnet['subnet']['allocation_pools'], allocation_pools) def test_create_subnet_gw_values(self): cidr = '10.0.0.0/24' # Gateway is last IP in range gateway = '10.0.0.254' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.253'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway) # Gateway is first in subnet gateway = '10.0.0.1' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway) def test_create_subnet_ipv6_gw_values(self): cidr = '2001::/64' # Gateway is last IP in IPv6 DHCPv6 stateful subnet gateway = '2001::ffff:ffff:ffff:fffe' allocation_pools = [{'start': '2001::1', 'end': '2001::ffff:ffff:ffff:fffd'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) # Gateway is first IP in IPv6 DHCPv6 stateful subnet gateway = '2001::1' allocation_pools = [{'start': '2001::2', 'end': '2001::ffff:ffff:ffff:fffe'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) # If gateway_ip is not specified, allocate first IP from the subnet expected = {'gateway_ip': gateway, 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_gw_outside_cidr_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='100.0.0.1') def test_create_subnet_gw_of_network_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.0') def test_create_subnet_gw_bcast_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.255') def test_create_subnet_with_allocation_pool(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_none_gateway(self): cidr = '10.0.0.0/24' self._test_create_subnet(gateway_ip=None, cidr=cidr) def test_create_subnet_with_none_gateway_fully_allocated(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.254'}] self._test_create_subnet(gateway_ip=None, cidr=cidr, allocation_pools=allocation_pools) def test_subnet_with_allocation_range(self): with self.network() as network: net_id = network['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'gateway_ip': '10.0.0.1', 'tenant_id': network['network']['tenant_id'], 'allocation_pools': [{'start': '10.0.0.100', 'end': '10.0.0.120'}]}} subnet_req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, subnet_req.get_response(self.api)) # Check fixed IP not in allocation range kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.10'}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) port = self.deserialize(self.fmt, res) # delete the port self._delete('ports', port['port']['id']) # Check when fixed IP is gateway kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.1'}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) port = self.deserialize(self.fmt, res) # delete the port self._delete('ports', port['port']['id']) def test_create_subnet_with_none_gateway_allocation_pool(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] self._test_create_subnet(gateway_ip=None, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_v6_allocation_pool(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' allocation_pools = [{'start': 'fe80::2', 'end': 'fe80::ffff:fffa:ffff'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, allocation_pools=allocation_pools) def test_create_subnet_with_large_allocation_pool(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/8' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.1.0.0', 'end': '10.200.0.100'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_multiple_allocation_pools(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.0.0.110', 'end': '10.0.0.150'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_dhcp_disabled(self): enable_dhcp = False self._test_create_subnet(enable_dhcp=enable_dhcp) def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.5'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPConflict.code) def test_create_subnet_gateway_in_allocation_pool_returns_409(self): gateway_ip = '10.0.0.50' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.100'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPConflict.code) def test_create_subnet_overlapping_allocation_pools_returns_409(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.150'}, {'start': '10.0.0.140', 'end': '10.0.0.180'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPConflict.code) def test_create_subnet_invalid_allocation_pool_returns_400(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.256'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_out_of_range_allocation_pool_returns_400(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.1.6'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_shared_returns_400(self): cidr = '10.0.0.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, shared=True) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv6_cidrv4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 6, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv4_cidrv6(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv4_gatewayv6(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'gateway_ip': 'fe80::1', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv6_gatewayv4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': 6, 'gateway_ip': '192.168.0.1', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv6_dns_v4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': 6, 'dns_nameservers': ['192.168.0.1'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): host_routes = [{'destination': 'fe80::0/48', 'nexthop': '10.0.2.20'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': 'fe80::1'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def _test_validate_subnet_ipv6_modes(self, cur_subnet=None, expect_success=True, **modes): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context(load_admin_roles=False) new_subnet = {'ip_version': 6, 'cidr': 'fe80::/64', 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None} for mode, value in modes.items(): new_subnet[mode] = value if expect_success: plugin._validate_subnet(ctx, new_subnet, cur_subnet) else: self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet, ctx, new_subnet, cur_subnet) def test_create_subnet_ipv6_ra_modes(self): # Test all RA modes with no address mode specified for ra_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ra_mode) def test_create_subnet_ipv6_addr_modes(self): # Test all address modes with no RA mode specified for addr_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_address_mode=addr_mode) def test_create_subnet_ipv6_same_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode==addr_mode for ipv6_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ipv6_mode, ipv6_address_mode=ipv6_mode) def test_create_subnet_ipv6_different_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode!=addr_mode for ra_mode, addr_mode in itertools.permutations( constants.IPV6_MODES, 2): self._test_validate_subnet_ipv6_modes( expect_success=not (ra_mode and addr_mode), ipv6_ra_mode=ra_mode, ipv6_address_mode=addr_mode) def test_create_subnet_ipv6_out_of_cidr_global(self): gateway_ip = '2000::1' cidr = '2001::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_ipv6_out_of_cidr_lla(self): gateway_ip = 'fe80::1' cidr = '2001::/64' self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self): gateway_ip = 'fe80::1' cidr = 'fe80::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: for mode in constants.IPV6_MODES: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, enable_dhcp=False, ipv6_ra_mode=mode, ipv6_address_mode=mode) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_invalid_ipv6_ra_mode(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode='foo', ipv6_address_mode='slaac') self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_invalid_ipv6_address_mode(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode='slaac', ipv6_address_mode='baz') self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_ipv6_ra_mode_ip_version_4(self): cidr = '10.0.2.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, ip_version=4, ipv6_ra_mode=constants.DHCPV6_STATEFUL) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def test_create_subnet_ipv6_address_mode_ip_version_4(self): cidr = '10.0.2.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( cidr=cidr, ip_version=4, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) def _test_create_subnet_ipv6_auto_addr_with_port_on_network( self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE, insert_db_reference_error=False): # Create a network with one IPv4 subnet and one port with self.network() as network,\ self.subnet(network=network) as v4_subnet,\ self.port(subnet=v4_subnet, device_owner=device_owner) as port: if insert_db_reference_error: def db_ref_err_for_ipalloc(instance): if instance.__class__.__name__ == 'IPAllocation': raise db_exc.DBReferenceError( 'dummy_table', 'dummy_constraint', 'dummy_key', 'dummy_key_table') mock.patch.object(orm.Session, 'add', side_effect=db_ref_err_for_ipalloc).start() # Add an IPv6 auto-address subnet to the network v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1', 'fe80::/64', ip_version=6, ipv6_ra_mode=addr_mode, ipv6_address_mode=addr_mode) if (insert_db_reference_error or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or device_owner in constants.ROUTER_INTERFACE_OWNERS): # DVR SNAT and router interfaces should not have been # updated with addresses from the new auto-address subnet self.assertEqual(1, len(port['port']['fixed_ips'])) else: # Confirm that the port has been updated with an address # from the new auto-address subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) fixed_ips = sport['port']['fixed_ips'] self.assertEqual(2, len(fixed_ips)) self.assertIn(v6_subnet['subnet']['id'], [fixed_ip['subnet_id'] for fixed_ip in fixed_ips]) def test_create_subnet_ipv6_slaac_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC) def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.DHCPV6_STATELESS) def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_DHCP) def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_SNAT) def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, insert_db_reference_error=True) def test_update_subnet_no_gateway(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['gateway_ip'], data['subnet']['gateway_ip']) data = {'subnet': {'gateway_ip': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNone(data['subnet']['gateway_ip']) def test_update_subnet(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['gateway_ip'], data['subnet']['gateway_ip']) def test_update_subnet_adding_additional_host_routes_and_dns(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': '10.0.2.2'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'dns_nameservers': ['192.168.0.1'], 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, subnet_req.get_response(self.api)) host_routes = [{'destination': '172.16.0.0/24', 'nexthop': '10.0.2.2'}, {'destination': '192.168.0.0/24', 'nexthop': '10.0.2.3'}] dns_nameservers = ['192.168.0.1', '192.168.0.2'] data = {'subnet': {'host_routes': host_routes, 'dns_nameservers': dns_nameservers}} req = self.new_update_request('subnets', data, res['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(sorted(res['subnet']['host_routes']), sorted(host_routes)) self.assertEqual(sorted(res['subnet']['dns_nameservers']), sorted(dns_nameservers)) def test_update_subnet_shared_returns_400(self): with self.network(shared=True) as network: with self.subnet(network=network) as subnet: data = {'subnet': {'shared': True}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_gw_outside_cidr_returns_400(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': '100.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_gw_ip_in_use_returns_409(self): with self.network() as network: with self.subnet( network=network, allocation_pools=[{'start': '10.0.0.100', 'end': '10.0.0.253'}]) as subnet: subnet_data = subnet['subnet'] with self.port( subnet=subnet, fixed_ips=[{'subnet_id': subnet_data['id'], 'ip_address': subnet_data['gateway_ip']}]): data = {'subnet': {'gateway_ip': '10.0.0.99'}} req = self.new_update_request('subnets', data, subnet_data['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 409) def test_update_subnet_inconsistent_ipv4_gatewayv6(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': 'fe80::1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_inconsistent_ipv6_gatewayv4(self): with self.network() as network: with self.subnet(network=network, ip_version=6, cidr='fe80::/48') as subnet: data = {'subnet': {'gateway_ip': '10.1.1.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_inconsistent_ipv4_dns_v6(self): dns_nameservers = ['fe80::1'] with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'dns_nameservers': dns_nameservers}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): host_routes = [{'destination': 'fe80::0/48', 'nexthop': '10.0.2.20'}] with self.network() as network: with self.subnet(network=network, ip_version=6, cidr='fe80::/48') as subnet: data = {'subnet': {'host_routes': host_routes}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': 'fe80::1'}] with self.network() as network: with self.subnet(network=network, ip_version=6, cidr='fe80::/48') as subnet: data = {'subnet': {'host_routes': host_routes}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_gateway_in_allocation_pool_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'gateway_ip': '10.0.0.50'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_update_subnet_ipv6_attributes_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL, 'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_ipv6_ra_mode_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_ipv6_address_mode_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_ipv6_cannot_disable_dhcp(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_ipv6_ra_mode_ip_version_4(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_ipv6_address_mode_ip_version_4(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_allocation_pools(self): """Test that we can successfully update with sane params. This will create a subnet with specified allocation_pools Then issue an update (PUT) to update these using correct (i.e. non erroneous) params. Finally retrieve the updated subnet and verify. """ allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '192.168.0.10', 'end': '192.168.0.20'}, {'start': '192.168.0.30', 'end': '192.168.0.40'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) #check res code but then do GET on subnet for verification res = req.get_response(self.api) self.assertEqual(res.status_code, 200) req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(len(res['subnet']['allocation_pools']), 2) res_vals = res['subnet']['allocation_pools'][0].values() +\ res['subnet']['allocation_pools'][1].values() for pool_val in ['10', '20', '30', '40']: self.assertTrue('192.168.0.%s' % (pool_val) in res_vals) #updating alloc pool to something outside subnet.cidr def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self): """Test update alloc pool to something outside subnet.cidr. This makes sure that an erroneous allocation_pool specified in a subnet update (outside subnet cidr) will result in an error. """ allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.10', 'end': '10.0.0.20'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def _test_subnet_update_enable_dhcp_no_ip_available_returns_409( self, allocation_pools, cidr): ip_version = netaddr.IPNetwork(cidr).version with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, enable_dhcp=False, cidr=cidr, ip_version=ip_version) as subnet: id = subnet['subnet']['network_id'] self._create_port(self.fmt, id) data = {'subnet': {'enable_dhcp': True}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv4(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.2'}] cidr = '10.0.0.0/30' self._test_subnet_update_enable_dhcp_no_ip_available_returns_409( allocation_pools, cidr) def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv6(self): allocation_pools = [{'start': '2001:db8::2', 'end': '2001:db8::2'}] cidr = '2001:db8::/126' self._test_subnet_update_enable_dhcp_no_ip_available_returns_409( allocation_pools, cidr) def test_show_subnet(self): with self.network() as network: with self.subnet(network=network) as subnet: req = self.new_show_request('subnets', subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['id'], subnet['subnet']['id']) self.assertEqual(res['subnet']['network_id'], network['network']['id']) def test_list_subnets(self): with self.network() as network: with contextlib.nested(self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24'), self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24'), self.subnet(network=network, gateway_ip='10.0.2.1', cidr='10.0.2.0/24')) as subnets: self._test_list_resources('subnet', subnets) def test_list_subnets_shared(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: with self.subnet(cidr='10.0.1.0/24') as priv_subnet: # normal user should see only 1 subnet req = self.new_list_request('subnets') req.environ['neutron.context'] = context.Context( '', 'some_tenant') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(len(res['subnets']), 1) self.assertEqual(res['subnets'][0]['cidr'], subnet['subnet']['cidr']) # admin will see both subnets admin_req = self.new_list_request('subnets') admin_res = self.deserialize( self.fmt, admin_req.get_response(self.api)) self.assertEqual(len(admin_res['subnets']), 2) cidrs = [sub['cidr'] for sub in admin_res['subnets']] self.assertIn(subnet['subnet']['cidr'], cidrs) self.assertIn(priv_subnet['subnet']['cidr'], cidrs) def test_list_subnets_with_parameter(self): with self.network() as network: with contextlib.nested(self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24'), self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') ) as subnets: query_params = 'ip_version=4&ip_version=6' self._test_list_resources('subnet', subnets, query_params=query_params) query_params = 'ip_version=6' self._test_list_resources('subnet', [], query_params=query_params) def test_list_subnets_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with contextlib.nested(self.subnet(enable_dhcp=True, cidr='10.0.0.0/24'), self.subnet(enable_dhcp=False, cidr='11.0.0.0/24'), self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') ) as (subnet1, subnet2, subnet3): self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) def test_list_subnets_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() with contextlib.nested(self.subnet(enable_dhcp=True, cidr='10.0.0.0/24'), self.subnet(enable_dhcp=False, cidr='11.0.0.0/24'), self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') ) as (subnet1, subnet2, subnet3): self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) def test_list_subnets_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented sorting feature") with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), self.subnet(cidr='11.0.0.0/24'), self.subnet(cidr='12.0.0.0/24') ) as (subnet1, subnet2, subnet3): self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), self.subnet(cidr='11.0.0.0/24'), self.subnet(cidr='12.0.0.0/24') ) as (subnet1, subnet2, subnet3): self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_reverse_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), self.subnet(cidr='11.0.0.0/24'), self.subnet(cidr='12.0.0.0/24') ) as (subnet1, subnet2, subnet3): self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), self.subnet(cidr='11.0.0.0/24'), self.subnet(cidr='12.0.0.0/24') ) as (subnet1, subnet2, subnet3): self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_invalid_ip_version(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 7, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_invalid_subnet(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'invalid', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_invalid_ip_address(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'ipaddress'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_invalid_uuid(self): with self.network() as network: data = {'subnet': {'network_id': 'invalid-uuid', 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_with_one_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] dns_nameservers = ['1.2.3.4'] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, dns_nameservers=dns_nameservers) def test_create_subnet_with_two_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] dns_nameservers = ['1.2.3.4', '4.3.2.1'] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, dns_nameservers=dns_nameservers) def test_create_subnet_with_too_many_dns(self): with self.network() as network: dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1', 'dns_nameservers': dns_list}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_with_one_host_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, host_routes=host_routes) def test_create_subnet_with_two_host_routes(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}, {'destination': '12.0.0.0/8', 'nexthop': '4.3.2.1'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, host_routes=host_routes) def test_create_subnet_with_too_many_routes(self): with self.network() as network: host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}, {'destination': '12.0.0.0/8', 'nexthop': '4.3.2.1'}, {'destination': '141.212.0.0/16', 'nexthop': '2.2.2.2'}] data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1', 'host_routes': host_routes}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_dns(self): with self.subnet() as subnet: data = {'subnet': {'dns_nameservers': ['11.0.0.1']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['dns_nameservers'], data['subnet']['dns_nameservers']) def test_update_subnet_dns_to_None(self): with self.subnet(dns_nameservers=['11.0.0.1']) as subnet: data = {'subnet': {'dns_nameservers': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['subnet']['dns_nameservers']) data = {'subnet': {'dns_nameservers': ['11.0.0.3']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) def test_update_subnet_dns_with_too_many_entries(self): with self.subnet() as subnet: dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] data = {'subnet': {'dns_nameservers': dns_list}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_update_subnet_route(self): with self.subnet() as subnet: data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['host_routes'], data['subnet']['host_routes']) def test_update_subnet_route_to_None(self): with self.subnet(host_routes=[{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]) as subnet: data = {'subnet': {'host_routes': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['subnet']['host_routes']) data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['host_routes'], res['subnet']['host_routes']) def test_update_subnet_route_with_too_many_entries(self): with self.subnet() as subnet: data = {'subnet': {'host_routes': [ {'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}, {'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'}, {'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_delete_subnet_with_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' dns_nameservers = ['1.2.3.4'] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, dns_nameservers=dns_nameservers) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_delete_subnet_with_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, host_routes=host_routes) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_delete_subnet_with_dns_and_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' dns_nameservers = ['1.2.3.4'] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, dns_nameservers=dns_nameservers, host_routes=host_routes) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def _helper_test_validate_subnet(self, option, exception): cfg.CONF.set_override(option, 0) with self.network() as network: subnet = {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1', 'dns_nameservers': ['8.8.8.8'], 'host_routes': [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}]} plugin = manager.NeutronManager.get_plugin() e = self.assertRaises(exception, plugin._validate_subnet, context.get_admin_context( load_admin_roles=False), subnet) self.assertThat( str(e), matchers.Not(matchers.Contains('built-in function id'))) def test_validate_subnet_dns_nameservers_exhausted(self): self._helper_test_validate_subnet( 'max_dns_nameservers', n_exc.DNSNameServersExhausted) def test_validate_subnet_host_routes_exhausted(self): self._helper_test_validate_subnet( 'max_subnet_host_routes', n_exc.HostRoutesExhausted) def test_port_prevents_network_deletion(self): with self.port() as p: self._delete('networks', p['port']['network_id'], expected_code=webob.exc.HTTPConflict.code) def test_port_prevents_subnet_deletion(self): with self.port() as p: self._delete('subnets', p['port']['fixed_ips'][0]['subnet_id'], expected_code=webob.exc.HTTPConflict.code) class TestSubnetPoolsV2(NeutronDbPluginV2TestCase): _POOL_NAME = 'test-pool' def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def _validate_default_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['default_prefixlen'], prefix) def _validate_min_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['min_prefixlen'], prefix) def _validate_max_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['max_prefixlen'], prefix) def test_create_subnetpool_empty_prefix_list(self): self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') def test_create_subnetpool_ipv4_24_with_defaults(self): subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool([subnet.cidr], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') self._validate_default_prefix('21', subnetpool) self._validate_min_prefix('21', subnetpool) def test_create_subnetpool_ipv4_21_with_defaults(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') self._validate_default_prefix('21', subnetpool) self._validate_min_prefix('21', subnetpool) def test_create_subnetpool_ipv4_default_prefix_too_small(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='20') def test_create_subnetpool_ipv4_default_prefix_too_large(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, max_prefixlen=24, default_prefixlen='32') def test_create_subnetpool_ipv4_default_prefix_bounds(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME) self._validate_min_prefix('8', subnetpool) self._validate_default_prefix('8', subnetpool) self._validate_max_prefix('32', subnetpool) def test_create_subnetpool_ipv6_default_prefix_bounds(self): subnet = netaddr.IPNetwork('fe80::/48') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME) self._validate_min_prefix('64', subnetpool) self._validate_default_prefix('64', subnetpool) self._validate_max_prefix('128', subnetpool) def test_create_subnetpool_ipv4_supported_default_prefix(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='26') self._validate_default_prefix('26', subnetpool) def test_create_subnetpool_ipv4_supported_min_prefix(self): subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='26') self._validate_min_prefix('26', subnetpool) self._validate_default_prefix('26', subnetpool) def test_create_subnetpool_ipv4_default_prefix_smaller_than_min(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, default_prefixlen='22', min_prefixlen='23') def test_create_subnetpool_mixed_ip_version(self): subnet_v4 = netaddr.IPNetwork('10.10.10.0/21') subnet_v6 = netaddr.IPNetwork('fe80::/48') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet_v4.cidr, subnet_v6.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') def test_create_subnetpool_ipv6_with_defaults(self): subnet = netaddr.IPNetwork('fe80::/48') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='48') self._validate_default_prefix('48', subnetpool) self._validate_min_prefix('48', subnetpool) def test_get_subnetpool(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(subnetpool['subnetpool']['id'], res['subnetpool']['id']) def test_get_subnetpool_different_tenants_not_shared(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], shared=False, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) neutron_context = context.Context('', 'not-the-owner') req.environ['neutron.context'] = neutron_context res = req.get_response(self.api) self.assertEqual(res.status_int, 404) def test_get_subnetpool_different_tenants_shared(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) neutron_context = context.Context('', self._tenant_id) req.environ['neutron.context'] = neutron_context res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(subnetpool['subnetpool']['id'], res['subnetpool']['id']) def test_list_subnetpools_different_tenants_shared(self): self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) admin_res = self._list('subnetpools') mortal_res = self._list('subnetpools', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(len(admin_res['subnetpools']), 1) self.assertEqual(len(mortal_res['subnetpools']), 1) def test_list_subnetpools_different_tenants_not_shared(self): self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=False) admin_res = self._list('subnetpools') mortal_res = self._list('subnetpools', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(len(admin_res['subnetpools']), 1) self.assertEqual(len(mortal_res['subnetpools']), 0) def test_delete_subnetpool(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_delete_request('subnetpools', subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 204) def test_delete_nonexistent_subnetpool(self): req = self.new_delete_request('subnetpools', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') res = req.get_response(self._api_for_resource('subnetpools')) self.assertEqual(res.status_int, 404) def test_update_subnetpool_prefix_list_append(self): initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertItemsEqual(res['subnetpool']['prefixes'], ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24']) def test_update_subnetpool_prefix_list_compaction(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.10.0/24', '10.10.11.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertItemsEqual(res['subnetpool']['prefixes'], ['10.10.10.0/23']) def test_illegal_subnetpool_prefix_list_update(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.11.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_default_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'default_prefixlen': '26'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertEqual(res['subnetpool']['default_prefixlen'], 26) def test_update_subnetpool_min_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'min_prefixlen': '21'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnetpool']['min_prefixlen'], 21) def test_update_subnetpool_min_prefix_larger_than_max(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'min_prefixlen': '28'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_max_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '26'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnetpool']['max_prefixlen'], 26) def test_update_subnetpool_max_prefix_less_than_min(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '21'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_max_prefix_less_than_default(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '22'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_default_prefix_less_than_min(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnetpool': {'default_prefixlen': '20'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_default_prefix_larger_than_max(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'default_prefixlen': '28'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_prefix_list_mixed_ip_version(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['fe80::/48']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_update_subnetpool_default_quota(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24', default_quota=10) self.assertEqual(initial_subnetpool['subnetpool']['default_quota'], 10) data = {'subnetpool': {'default_quota': '1'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnetpool']['default_quota'], 1) def test_allocate_any_subnet_with_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a subnet allocation (no CIDR) data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': 24, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(subnet.prefixlen, 24) # Assert the allocated subnet CIDR is a subnet of our pool prefix supernet = netaddr.smallest_matching_cidr( subnet, sp['subnetpool']['prefixes']) self.assertEqual(supernet, netaddr.IPNetwork('10.10.0.0/16')) def test_allocate_any_subnet_with_default_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request any subnet allocation using default prefix data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(subnet.prefixlen, int(sp['subnetpool']['default_prefixlen'])) def test_allocate_specific_subnet_with_mismatch_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'prefixlen': 26, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_allocate_specific_subnet_with_matching_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'prefixlen': 24, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_allocate_specific_subnet(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) # Assert the allocated subnet CIDR is what we expect subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(subnet, netaddr.IPNetwork('10.10.1.0/24')) def test_allocate_specific_subnet_non_existent_prefix(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '192.168.1.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 500) def test_allocate_specific_subnet_already_allocated(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.10.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) # Allocate the subnet res = req.get_response(self.api) self.assertEqual(res.status_int, 201) # Attempt to allocate it again res = req.get_response(self.api) # Assert error self.assertEqual(res.status_int, 500) def test_allocate_specific_subnet_prefix_too_small(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/20', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_allocate_specific_subnet_prefix_specific_gw(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'gateway_ip': '10.10.1.254', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['gateway_ip'], '10.10.1.254') def test_allocate_specific_subnet_prefix_allocation_pools(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation pools = [{'start': '10.10.1.2', 'end': '10.10.1.253'}] data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'gateway_ip': '10.10.1.1', 'ip_version': 4, 'allocation_pools': pools, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['allocation_pools'][0]['start'], pools[0]['start']) self.assertEqual(res['subnet']['allocation_pools'][0]['end'], pools[0]['end']) def test_allocate_any_subnet_prefix_allocation_pools(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request an any subnet allocation pools = [{'start': '10.10.10.1', 'end': '10.10.10.254'}] data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': '24', 'ip_version': 4, 'allocation_pools': pools, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_allocate_specific_subnet_prefix_too_large(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_delete_subnetpool_existing_allocations(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) req.get_response(self.api) req = self.new_delete_request('subnetpools', sp['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) def test_allocate_subnet_over_quota(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_quota=2048) # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': 4, 'prefixlen': 21, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) # Allocate a subnet to fill the quota res = req.get_response(self.api) self.assertEqual(res.status_int, 201) # Attempt to allocate a /21 again res = req.get_response(self.api) # Assert error self.assertEqual(res.status_int, 409) def test_allocate_any_ipv4_subnet_ipv6_pool(self): with self.network() as network: sp = self._test_create_subnetpool(['2001:db8:1:2::/63'], tenant_id=self._tenant_id, name=self._POOL_NAME) # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) class DbModelTestCase(base.BaseTestCase): """DB model tests.""" def test_repr(self): """testing the string representation of 'model' classes.""" network = models_v2.Network(name="net_net", status="OK", admin_state_up=True) actual_repr_output = repr(network) exp_start_with = "<neutron.db.models_v2.Network" exp_middle = "[object at %x]" % id(network) exp_end_with = (" {tenant_id=None, id=None, " "name='net_net', status='OK', " "admin_state_up=True, shared=None, " "mtu=None, vlan_transparent=None}>") final_exp = exp_start_with + exp_middle + exp_end_with self.assertEqual(actual_repr_output, final_exp) class TestNeutronDbPluginV2(base.BaseTestCase): """Unit Tests for NeutronDbPluginV2 IPAM Logic.""" def test_generate_ip(self): with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_try_generate_ip') as generate: with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_rebuild_availability_ranges') as rebuild: db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's') generate.assert_called_once_with('c', 's') self.assertEqual(0, rebuild.call_count) def test_generate_ip_exhausted_pool(self): with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_try_generate_ip') as generate: with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_rebuild_availability_ranges') as rebuild: exception = n_exc.IpAddressGenerationFailure(net_id='n') # fail first call but not second generate.side_effect = [exception, None] db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's') self.assertEqual(2, generate.call_count) rebuild.assert_called_once_with('c', 's') def _validate_rebuild_availability_ranges(self, pools, allocations, expected): ip_qry = mock.Mock() ip_qry.with_lockmode.return_value = ip_qry ip_qry.filter_by.return_value = allocations pool_qry = mock.Mock() pool_qry.options.return_value = pool_qry pool_qry.with_lockmode.return_value = pool_qry pool_qry.filter_by.return_value = pools def return_queries_side_effect(*args, **kwargs): if args[0] == models_v2.IPAllocation: return ip_qry if args[0] == models_v2.IPAllocationPool: return pool_qry context = mock.Mock() context.session.query.side_effect = return_queries_side_effect subnets = [mock.MagicMock()] db_base_plugin_v2.NeutronDbPluginV2._rebuild_availability_ranges( context, subnets) actual = [[args[0].allocation_pool_id, args[0].first_ip, args[0].last_ip] for _name, args, _kwargs in context.session.add.mock_calls] self.assertEqual(expected, actual) def test_rebuild_availability_ranges(self): pools = [{'id': 'a', 'first_ip': '192.168.1.3', 'last_ip': '192.168.1.10'}, {'id': 'b', 'first_ip': '192.168.1.100', 'last_ip': '192.168.1.120'}] allocations = [{'ip_address': '192.168.1.3'}, {'ip_address': '192.168.1.78'}, {'ip_address': '192.168.1.7'}, {'ip_address': '192.168.1.110'}, {'ip_address': '192.168.1.11'}, {'ip_address': '192.168.1.4'}, {'ip_address': '192.168.1.111'}] expected = [['a', '192.168.1.5', '192.168.1.6'], ['a', '192.168.1.8', '192.168.1.10'], ['b', '192.168.1.100', '192.168.1.109'], ['b', '192.168.1.112', '192.168.1.120']] self._validate_rebuild_availability_ranges(pools, allocations, expected) def test_rebuild_ipv6_availability_ranges(self): pools = [{'id': 'a', 'first_ip': '2001::1', 'last_ip': '2001::50'}, {'id': 'b', 'first_ip': '2001::100', 'last_ip': '2001::ffff:ffff:ffff:fffe'}] allocations = [{'ip_address': '2001::10'}, {'ip_address': '2001::45'}, {'ip_address': '2001::60'}, {'ip_address': '2001::111'}, {'ip_address': '2001::200'}, {'ip_address': '2001::ffff:ffff:ffff:ff10'}, {'ip_address': '2001::ffff:ffff:ffff:f2f0'}] expected = [['a', '2001::1', '2001::f'], ['a', '2001::11', '2001::44'], ['a', '2001::46', '2001::50'], ['b', '2001::100', '2001::110'], ['b', '2001::112', '2001::1ff'], ['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'], ['b', '2001::ffff:ffff:ffff:f2f1', '2001::ffff:ffff:ffff:ff0f'], ['b', '2001::ffff:ffff:ffff:ff11', '2001::ffff:ffff:ffff:fffe']] self._validate_rebuild_availability_ranges(pools, allocations, expected) def _test__allocate_ips_for_port(self, subnets, port, expected): plugin = db_base_plugin_v2.NeutronDbPluginV2() with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_subnets') as get_subnets: with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_check_unique_ip') as check_unique: context = mock.Mock() get_subnets.return_value = subnets check_unique.return_value = True actual = plugin._allocate_ips_for_port(context, port) self.assertEqual(expected, actual) def test__allocate_ips_for_port_2_slaac_subnets(self): subnets = [ { 'cidr': u'2001:100::/64', 'enable_dhcp': True, 'gateway_ip': u'2001:100::1', 'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13', 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': u'slaac'}, { 'cidr': u'2001:200::/64', 'enable_dhcp': True, 'gateway_ip': u'2001:200::1', 'id': u'dc813d3d-ed66-4184-8570-7325c8195e28', 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': u'slaac'}] port = {'port': { 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'mac_address': '12:34:56:78:44:ab', 'device_owner': 'compute'}} expected = [] for subnet in subnets: addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( subnet['cidr'], port['port']['mac_address'])) expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) self._test__allocate_ips_for_port(subnets, port, expected) class NeutronDbPluginV2AsMixinTestCase(testlib_api.SqlTestCase): """Tests for NeutronDbPluginV2 as Mixin. While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as a complete plugin, this test case verifies abilities of NeutronDbPlugin which are provided to other plugins (e.g. DB operations). This test case may include tests only for NeutronDbPlugin, so this should not be used in unit tests for other plugins. """ def setUp(self): super(NeutronDbPluginV2AsMixinTestCase, self).setUp() self.plugin = importutils.import_object(DB_PLUGIN_KLASS) self.context = context.get_admin_context() self.net_data = {'network': {'id': 'fake-id', 'name': 'net1', 'admin_state_up': True, 'tenant_id': 'test-tenant', 'shared': False}} def test_create_network_with_default_status(self): net = self.plugin.create_network(self.context, self.net_data) default_net_create_status = 'ACTIVE' expected = [('id', 'fake-id'), ('name', 'net1'), ('admin_state_up', True), ('tenant_id', 'test-tenant'), ('shared', False), ('status', default_net_create_status)] for k, v in expected: self.assertEqual(net[k], v) def test_create_network_with_status_BUILD(self): self.net_data['network']['status'] = 'BUILD' net = self.plugin.create_network(self.context, self.net_data) self.assertEqual(net['status'], 'BUILD') class TestNetworks(testlib_api.SqlTestCase): def setUp(self): super(TestNetworks, self).setUp() self._tenant_id = 'test-tenant' # Update the plugin self.setup_coreplugin(DB_PLUGIN_KLASS) def _create_network(self, plugin, ctx, shared=True): network = {'network': {'name': 'net', 'shared': shared, 'admin_state_up': True, 'tenant_id': self._tenant_id}} created_network = plugin.create_network(ctx, network) return (network, created_network['id']) def _create_port(self, plugin, ctx, net_id, device_owner, tenant_id): port = {'port': {'name': 'port', 'network_id': net_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': 'device_id', 'device_owner': device_owner, 'tenant_id': tenant_id}} plugin.create_port(ctx, port) def _test_update_shared_net_used(self, device_owner, expected_exception=None): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() network, net_id = self._create_network(plugin, ctx) self._create_port(plugin, ctx, net_id, device_owner, self._tenant_id + '1') network['network']['shared'] = False if (expected_exception): with testlib_api.ExpectedException(expected_exception): plugin.update_network(ctx, net_id, network) else: plugin.update_network(ctx, net_id, network) def test_update_shared_net_used_fails(self): self._test_update_shared_net_used('', n_exc.InvalidSharedSetting) def test_update_shared_net_used_as_router_gateway(self): self._test_update_shared_net_used( constants.DEVICE_OWNER_ROUTER_GW) def test_update_shared_net_used_by_floating_ip(self): self._test_update_shared_net_used( constants.DEVICE_OWNER_FLOATINGIP)
apache-2.0
eltonsantos/django
tests/utils_tests/test_decorators.py
115
3920
from django.http import HttpResponse from django.template import Template, Context from django.template.response import TemplateResponse from django.test import TestCase, RequestFactory from django.utils.decorators import decorator_from_middleware class ProcessViewMiddleware(object): def process_view(self, request, view_func, view_args, view_kwargs): pass process_view_dec = decorator_from_middleware(ProcessViewMiddleware) @process_view_dec def process_view(request): return HttpResponse() class ClassProcessView(object): def __call__(self, request): return HttpResponse() class_process_view = process_view_dec(ClassProcessView()) class FullMiddleware(object): def process_request(self, request): request.process_request_reached = True def process_view(sef, request, view_func, view_args, view_kwargs): request.process_view_reached = True def process_template_response(self, request, response): request.process_template_response_reached = True return response def process_response(self, request, response): # This should never receive unrendered content. request.process_response_content = response.content request.process_response_reached = True return response full_dec = decorator_from_middleware(FullMiddleware) class DecoratorFromMiddlewareTests(TestCase): """ Tests for view decorators created using ``django.utils.decorators.decorator_from_middleware``. """ rf = RequestFactory() def test_process_view_middleware(self): """ Test a middleware that implements process_view. """ process_view(self.rf.get('/')) def test_callable_process_view_middleware(self): """ Test a middleware that implements process_view, operating on a callable class. """ class_process_view(self.rf.get('/')) def test_full_dec_normal(self): """ Test that all methods of middleware are called for normal HttpResponses """ @full_dec def normal_view(request): t = Template("Hello world") return HttpResponse(t.render(Context({}))) request = self.rf.get('/') response = normal_view(request) self.assertTrue(getattr(request, 'process_request_reached', False)) self.assertTrue(getattr(request, 'process_view_reached', False)) # process_template_response must not be called for HttpResponse self.assertFalse(getattr(request, 'process_template_response_reached', False)) self.assertTrue(getattr(request, 'process_response_reached', False)) def test_full_dec_templateresponse(self): """ Test that all methods of middleware are called for TemplateResponses in the right sequence. """ @full_dec def template_response_view(request): t = Template("Hello world") return TemplateResponse(request, t, {}) request = self.rf.get('/') response = template_response_view(request) self.assertTrue(getattr(request, 'process_request_reached', False)) self.assertTrue(getattr(request, 'process_view_reached', False)) self.assertTrue(getattr(request, 'process_template_response_reached', False)) # response must not be rendered yet. self.assertFalse(response._is_rendered) # process_response must not be called until after response is rendered, # otherwise some decorators like csrf_protect and gzip_page will not # work correctly. See #16004 self.assertFalse(getattr(request, 'process_response_reached', False)) response.render() self.assertTrue(getattr(request, 'process_response_reached', False)) # Check that process_response saw the rendered content self.assertEqual(request.process_response_content, b"Hello world")
bsd-3-clause
mgeorgehansen/FIFE_Technomage
tests/analyzers/dep_analyzer.py
1
7256
#!/usr/bin/env python # -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2009 by the FIFE team # http://www.fifengine.de # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### import sys, re, os try: from os.shutil import copyfile except ImportError: def copyfile(src,dest): srcf = open(src) destf = open(dest,"w") destf.write( srcf.read() ) destf.close() srcf.close() if '.' not in sys.path: sys.path.append('.') from utils.util_scripts.path import path from _allowed_dependencies import ALLOWED_MODULE_DEPS _S = os.path.sep ROOTDIRNAME = 'engine%score' % _S FILE_DEPS_OUT = 'doc%sdependencies%sfiledeps' % (_S, _S) DIR_DEPS_OUT = 'doc%sdependencies%sdirdeps' % (_S, _S) MODULE_DEPS_OUT = 'doc%sdependencies%smoduledeps' % (_S, _S) SKIPPED_PROVIDERS = [] SKIPPED_USERS = [] reInc = re.compile('#include "(.*?)"') def add_to_dictset(d, key, val): try: d[key].add(val) except KeyError: d[key] = set([val]) # return error string in case of possible error unknown_modules = [] def check_dep_error(allowed_deps, user, provider): global unknown_modules msg = '' try: if not provider in allowed_deps[user]: msg = 'Illegal dependency between %s -> %s, % s can use only:\n' % (user, provider, user) for d in allowed_deps[user]: msg += ' %s\n' % d except KeyError: print unknown_modules if user not in unknown_modules: msg = 'Unknown module %s found in static check\n' % user msg += ' please adjust dep_analyzer script to match new structure' unknown_modules.append(user) return msg def get_file2inc(sources): file2inc = {} for f in sources: inComment = False for line in open(f): if not inComment and line.find('/*') != -1: inComment = True continue elif inComment: if line.find('*/') != -1: inComment = False continue elif line.strip().startswith('//'): continue m = reInc.search(line) if m: add_to_dictset(file2inc, f, m.group(1).replace( '/', _S )) return file2inc def fill_dep_infos(file2inc, fileUser2provider, dirUser2provider, moduleUser2provider, unknownIncludes, dirclusters): for f, incs in file2inc.items(): #if f.find('engine.cpp') != -1: # import pdb; pdb.set_trace() skip = False for user in SKIPPED_USERS: if f.find(user) != -1: skip = True break if skip: continue for i in incs: user = str(f.dirname()).replace(ROOTDIRNAME + _S, '') header = path((f.dirname() / path(i)).abspath().split(ROOTDIRNAME + _S)[1]) if not header.isfile(): header = path(ROOTDIRNAME) + _S + path(i) if not header.isfile(): add_to_dictset(unknownIncludes, str(f), str(i)) continue provider = str(header.dirname()).replace(ROOTDIRNAME + _S, '') skip = False for skipped in SKIPPED_PROVIDERS: if header.find(skipped) != -1: skip = True break if skip: continue add_to_dictset(dirUser2provider, user, provider) usermodule = user.split(_S)[0] providermodule = provider.split(_S)[0] userfile = user.split(_S)[-1].split('.')[0] providerfile = provider.split(_S)[-1].split('.')[0] add_to_dictset(dirclusters, usermodule, user) add_to_dictset(dirclusters, providermodule, provider) add_to_dictset(moduleUser2provider, usermodule, providermodule) add_to_dictset(fileUser2provider, userfile, providerfile) def write_dot_file(fname, contents): lines = [] a = lines.append a('digraph "source tree" {') a(' overlap=scale;') a(' size="8,10";') a(' ratio="fill";') a(' fontsize="16";') a(' fontname="Helvetica";') a(' clusterrank="local";') if type(contents) in (list, tuple): lines += contents else: lines.append(contents) a('}') open(fname, 'w').write('\n'.join(lines)) def get_cluster_str(ind, elements, label): lines = [] a = lines.append a('subgraph cluster_%d {' % ind) a(' style=filled;') a(' color=lightgrey;') a(' node [style=filled,color=white];') a(' %s' % '; '.join('"%s"' % d for d in elements)) a(' label = "%s";' % label) a('}') return '\n'.join(lines) def run_dot(basename,type): dotname = basename + ".dot" outname = basename + "." + type dotchanged = True try: olddot = open(dotname + "~").read() dotchanged = olddot != open(dotname).read() dotchanged = dotchanged or not os.path.exists(outname) except IOError: pass if not dotchanged: return print "Generating: ",outname cmd = 'dot -T%(type)s %(dotname)s > %(outname)s' % locals() os.system(cmd) copyfile(dotname,dotname + "~") def analyze(write_postscript=False): root = path(ROOTDIRNAME) headers = list(root.walkfiles('*.h')) sources = headers + list(root.walkfiles('*.cpp')) file2inc = get_file2inc(sources) moduleUser2provider = {} dirUser2provider = {} fileUser2provider = {} unknownIncludes = {} dirclusters = {} fill_dep_infos(file2inc, fileUser2provider, dirUser2provider, moduleUser2provider, unknownIncludes, dirclusters) # write module dep graph out = [] illegalModuleDeps = [] for user, providers in sorted(moduleUser2provider.items()): for provider in sorted(providers): if user != provider: out.append(' "' + user + '" -> "' + provider + '"') msg = check_dep_error(ALLOWED_MODULE_DEPS, user, provider) if msg: illegalModuleDeps.append(msg) write_dot_file('%s.dot' % MODULE_DEPS_OUT, out) if write_postscript: run_dot(MODULE_DEPS_OUT, "ps") run_dot(MODULE_DEPS_OUT,"png") # write dir dep graph out = [] for cluster, subdirs in sorted(dirclusters.items()): out.append(get_cluster_str(len(out), sorted(subdirs), cluster)) for user, providers in sorted(dirUser2provider.items()): for provider in sorted(providers): if user != provider: out.append(' "' + user + '" -> "' + provider + '"') write_dot_file('%s.dot' % DIR_DEPS_OUT, out) if write_postscript: run_dot(DIR_DEPS_OUT, "ps") # write file dep graph out = [] for user, providers in sorted(file2inc.items()): for provider in sorted(providers): if user != provider: out.append(' "' + user + '" -> "' + provider + '"') write_dot_file('%s.dot' % FILE_DEPS_OUT, out) # os.system('dot -Tps %s.dot > %s.ps' % (MODULE_DEPS_OUT, MODULE_DEPS_OUT)) # write raw dep info #out = [] #for f, file2inc result = '\n'.join(illegalModuleDeps) if result: print result else: print "no dependency analyzer errors found" return result _ANALYZE_FN_ = analyze if __name__ == '__main__': analyze(True)
lgpl-2.1
shickey/BearStatus
dateutil/tz.py
270
32741
""" Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard python 2.3+ datetime module. """ __author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" __license__ = "PSF License" import datetime import struct import time import sys import os relativedelta = None parser = None rrule = None __all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] try: from dateutil.tzwin import tzwin, tzwinlocal except (ImportError, OSError): tzwin, tzwinlocal = None, None ZERO = datetime.timedelta(0) EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal() class tzutc(datetime.tzinfo): def utcoffset(self, dt): return ZERO def dst(self, dt): return ZERO def tzname(self, dt): return "UTC" def __eq__(self, other): return (isinstance(other, tzutc) or (isinstance(other, tzoffset) and other._offset == ZERO)) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s()" % self.__class__.__name__ __reduce__ = object.__reduce__ class tzoffset(datetime.tzinfo): def __init__(self, name, offset): self._name = name self._offset = datetime.timedelta(seconds=offset) def utcoffset(self, dt): return self._offset def dst(self, dt): return ZERO def tzname(self, dt): return self._name def __eq__(self, other): return (isinstance(other, tzoffset) and self._offset == other._offset) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, `self._name`, self._offset.days*86400+self._offset.seconds) __reduce__ = object.__reduce__ class tzlocal(datetime.tzinfo): _std_offset = datetime.timedelta(seconds=-time.timezone) if time.daylight: _dst_offset = datetime.timedelta(seconds=-time.altzone) else: _dst_offset = _std_offset def utcoffset(self, dt): if self._isdst(dt): return self._dst_offset else: return self._std_offset def dst(self, dt): if self._isdst(dt): return self._dst_offset-self._std_offset else: return ZERO def tzname(self, dt): return time.tzname[self._isdst(dt)] def _isdst(self, dt): # We can't use mktime here. It is unstable when deciding if # the hour near to a change is DST or not. # # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, # dt.minute, dt.second, dt.weekday(), 0, -1)) # return time.localtime(timestamp).tm_isdst # # The code above yields the following result: # #>>> import tz, datetime #>>> t = tz.tzlocal() #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() #'BRDT' #>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() #'BRST' #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() #'BRST' #>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() #'BRDT' #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() #'BRDT' # # Here is a more stable implementation: # timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + dt.hour * 3600 + dt.minute * 60 + dt.second) return time.localtime(timestamp+time.timezone).tm_isdst def __eq__(self, other): if not isinstance(other, tzlocal): return False return (self._std_offset == other._std_offset and self._dst_offset == other._dst_offset) return True def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s()" % self.__class__.__name__ __reduce__ = object.__reduce__ class _ttinfo(object): __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"] def __init__(self): for attr in self.__slots__: setattr(self, attr, None) def __repr__(self): l = [] for attr in self.__slots__: value = getattr(self, attr) if value is not None: l.append("%s=%s" % (attr, `value`)) return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) def __eq__(self, other): if not isinstance(other, _ttinfo): return False return (self.offset == other.offset and self.delta == other.delta and self.isdst == other.isdst and self.abbr == other.abbr and self.isstd == other.isstd and self.isgmt == other.isgmt) def __ne__(self, other): return not self.__eq__(other) def __getstate__(self): state = {} for name in self.__slots__: state[name] = getattr(self, name, None) return state def __setstate__(self, state): for name in self.__slots__: if name in state: setattr(self, name, state[name]) class tzfile(datetime.tzinfo): # http://www.twinsun.com/tz/tz-link.htm # ftp://elsie.nci.nih.gov/pub/tz*.tar.gz def __init__(self, fileobj): if isinstance(fileobj, basestring): self._filename = fileobj fileobj = open(fileobj) elif hasattr(fileobj, "name"): self._filename = fileobj.name else: self._filename = `fileobj` # From tzfile(5): # # The time zone information files used by tzset(3) # begin with the magic characters "TZif" to identify # them as time zone information files, followed by # sixteen bytes reserved for future use, followed by # six four-byte values of type long, written in a # ``standard'' byte order (the high-order byte # of the value is written first). if fileobj.read(4) != "TZif": raise ValueError, "magic not found" fileobj.read(16) ( # The number of UTC/local indicators stored in the file. ttisgmtcnt, # The number of standard/wall indicators stored in the file. ttisstdcnt, # The number of leap seconds for which data is # stored in the file. leapcnt, # The number of "transition times" for which data # is stored in the file. timecnt, # The number of "local time types" for which data # is stored in the file (must not be zero). typecnt, # The number of characters of "time zone # abbreviation strings" stored in the file. charcnt, ) = struct.unpack(">6l", fileobj.read(24)) # The above header is followed by tzh_timecnt four-byte # values of type long, sorted in ascending order. # These values are written in ``standard'' byte order. # Each is used as a transition time (as returned by # time(2)) at which the rules for computing local time # change. if timecnt: self._trans_list = struct.unpack(">%dl" % timecnt, fileobj.read(timecnt*4)) else: self._trans_list = [] # Next come tzh_timecnt one-byte values of type unsigned # char; each one tells which of the different types of # ``local time'' types described in the file is associated # with the same-indexed transition time. These values # serve as indices into an array of ttinfo structures that # appears next in the file. if timecnt: self._trans_idx = struct.unpack(">%dB" % timecnt, fileobj.read(timecnt)) else: self._trans_idx = [] # Each ttinfo structure is written as a four-byte value # for tt_gmtoff of type long, in a standard byte # order, followed by a one-byte value for tt_isdst # and a one-byte value for tt_abbrind. In each # structure, tt_gmtoff gives the number of # seconds to be added to UTC, tt_isdst tells whether # tm_isdst should be set by localtime(3), and # tt_abbrind serves as an index into the array of # time zone abbreviation characters that follow the # ttinfo structure(s) in the file. ttinfo = [] for i in range(typecnt): ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) abbr = fileobj.read(charcnt) # Then there are tzh_leapcnt pairs of four-byte # values, written in standard byte order; the # first value of each pair gives the time (as # returned by time(2)) at which a leap second # occurs; the second gives the total number of # leap seconds to be applied after the given time. # The pairs of values are sorted in ascending order # by time. # Not used, for now if leapcnt: leap = struct.unpack(">%dl" % (leapcnt*2), fileobj.read(leapcnt*8)) # Then there are tzh_ttisstdcnt standard/wall # indicators, each stored as a one-byte value; # they tell whether the transition times associated # with local time types were specified as standard # time or wall clock time, and are used when # a time zone file is used in handling POSIX-style # time zone environment variables. if ttisstdcnt: isstd = struct.unpack(">%db" % ttisstdcnt, fileobj.read(ttisstdcnt)) # Finally, there are tzh_ttisgmtcnt UTC/local # indicators, each stored as a one-byte value; # they tell whether the transition times associated # with local time types were specified as UTC or # local time, and are used when a time zone file # is used in handling POSIX-style time zone envi- # ronment variables. if ttisgmtcnt: isgmt = struct.unpack(">%db" % ttisgmtcnt, fileobj.read(ttisgmtcnt)) # ** Everything has been read ** # Build ttinfo list self._ttinfo_list = [] for i in range(typecnt): gmtoff, isdst, abbrind = ttinfo[i] # Round to full-minutes if that's not the case. Python's # datetime doesn't accept sub-minute timezones. Check # http://python.org/sf/1447945 for some information. gmtoff = (gmtoff+30)//60*60 tti = _ttinfo() tti.offset = gmtoff tti.delta = datetime.timedelta(seconds=gmtoff) tti.isdst = isdst tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] tti.isstd = (ttisstdcnt > i and isstd[i] != 0) tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) self._ttinfo_list.append(tti) # Replace ttinfo indexes for ttinfo objects. trans_idx = [] for idx in self._trans_idx: trans_idx.append(self._ttinfo_list[idx]) self._trans_idx = tuple(trans_idx) # Set standard, dst, and before ttinfos. before will be # used when a given time is before any transitions, # and will be set to the first non-dst ttinfo, or to # the first dst, if all of them are dst. self._ttinfo_std = None self._ttinfo_dst = None self._ttinfo_before = None if self._ttinfo_list: if not self._trans_list: self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] else: for i in range(timecnt-1,-1,-1): tti = self._trans_idx[i] if not self._ttinfo_std and not tti.isdst: self._ttinfo_std = tti elif not self._ttinfo_dst and tti.isdst: self._ttinfo_dst = tti if self._ttinfo_std and self._ttinfo_dst: break else: if self._ttinfo_dst and not self._ttinfo_std: self._ttinfo_std = self._ttinfo_dst for tti in self._ttinfo_list: if not tti.isdst: self._ttinfo_before = tti break else: self._ttinfo_before = self._ttinfo_list[0] # Now fix transition times to become relative to wall time. # # I'm not sure about this. In my tests, the tz source file # is setup to wall time, and in the binary file isstd and # isgmt are off, so it should be in wall time. OTOH, it's # always in gmt time. Let me know if you have comments # about this. laststdoffset = 0 self._trans_list = list(self._trans_list) for i in range(len(self._trans_list)): tti = self._trans_idx[i] if not tti.isdst: # This is std time. self._trans_list[i] += tti.offset laststdoffset = tti.offset else: # This is dst time. Convert to std. self._trans_list[i] += laststdoffset self._trans_list = tuple(self._trans_list) def _find_ttinfo(self, dt, laststd=0): timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + dt.hour * 3600 + dt.minute * 60 + dt.second) idx = 0 for trans in self._trans_list: if timestamp < trans: break idx += 1 else: return self._ttinfo_std if idx == 0: return self._ttinfo_before if laststd: while idx > 0: tti = self._trans_idx[idx-1] if not tti.isdst: return tti idx -= 1 else: return self._ttinfo_std else: return self._trans_idx[idx-1] def utcoffset(self, dt): if not self._ttinfo_std: return ZERO return self._find_ttinfo(dt).delta def dst(self, dt): if not self._ttinfo_dst: return ZERO tti = self._find_ttinfo(dt) if not tti.isdst: return ZERO # The documentation says that utcoffset()-dst() must # be constant for every dt. return tti.delta-self._find_ttinfo(dt, laststd=1).delta # An alternative for that would be: # # return self._ttinfo_dst.offset-self._ttinfo_std.offset # # However, this class stores historical changes in the # dst offset, so I belive that this wouldn't be the right # way to implement this. def tzname(self, dt): if not self._ttinfo_std: return None return self._find_ttinfo(dt).abbr def __eq__(self, other): if not isinstance(other, tzfile): return False return (self._trans_list == other._trans_list and self._trans_idx == other._trans_idx and self._ttinfo_list == other._ttinfo_list) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, `self._filename`) def __reduce__(self): if not os.path.isfile(self._filename): raise ValueError, "Unpickable %s class" % self.__class__.__name__ return (self.__class__, (self._filename,)) class tzrange(datetime.tzinfo): def __init__(self, stdabbr, stdoffset=None, dstabbr=None, dstoffset=None, start=None, end=None): global relativedelta if not relativedelta: from dateutil import relativedelta self._std_abbr = stdabbr self._dst_abbr = dstabbr if stdoffset is not None: self._std_offset = datetime.timedelta(seconds=stdoffset) else: self._std_offset = ZERO if dstoffset is not None: self._dst_offset = datetime.timedelta(seconds=dstoffset) elif dstabbr and stdoffset is not None: self._dst_offset = self._std_offset+datetime.timedelta(hours=+1) else: self._dst_offset = ZERO if dstabbr and start is None: self._start_delta = relativedelta.relativedelta( hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) else: self._start_delta = start if dstabbr and end is None: self._end_delta = relativedelta.relativedelta( hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) else: self._end_delta = end def utcoffset(self, dt): if self._isdst(dt): return self._dst_offset else: return self._std_offset def dst(self, dt): if self._isdst(dt): return self._dst_offset-self._std_offset else: return ZERO def tzname(self, dt): if self._isdst(dt): return self._dst_abbr else: return self._std_abbr def _isdst(self, dt): if not self._start_delta: return False year = datetime.datetime(dt.year,1,1) start = year+self._start_delta end = year+self._end_delta dt = dt.replace(tzinfo=None) if start < end: return dt >= start and dt < end else: return dt >= start or dt < end def __eq__(self, other): if not isinstance(other, tzrange): return False return (self._std_abbr == other._std_abbr and self._dst_abbr == other._dst_abbr and self._std_offset == other._std_offset and self._dst_offset == other._dst_offset and self._start_delta == other._start_delta and self._end_delta == other._end_delta) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s(...)" % self.__class__.__name__ __reduce__ = object.__reduce__ class tzstr(tzrange): def __init__(self, s): global parser if not parser: from dateutil import parser self._s = s res = parser._parsetz(s) if res is None: raise ValueError, "unknown string format" # Here we break the compatibility with the TZ variable handling. # GMT-3 actually *means* the timezone -3. if res.stdabbr in ("GMT", "UTC"): res.stdoffset *= -1 # We must initialize it first, since _delta() needs # _std_offset and _dst_offset set. Use False in start/end # to avoid building it two times. tzrange.__init__(self, res.stdabbr, res.stdoffset, res.dstabbr, res.dstoffset, start=False, end=False) if not res.dstabbr: self._start_delta = None self._end_delta = None else: self._start_delta = self._delta(res.start) if self._start_delta: self._end_delta = self._delta(res.end, isend=1) def _delta(self, x, isend=0): kwargs = {} if x.month is not None: kwargs["month"] = x.month if x.weekday is not None: kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) if x.week > 0: kwargs["day"] = 1 else: kwargs["day"] = 31 elif x.day: kwargs["day"] = x.day elif x.yday is not None: kwargs["yearday"] = x.yday elif x.jyday is not None: kwargs["nlyearday"] = x.jyday if not kwargs: # Default is to start on first sunday of april, and end # on last sunday of october. if not isend: kwargs["month"] = 4 kwargs["day"] = 1 kwargs["weekday"] = relativedelta.SU(+1) else: kwargs["month"] = 10 kwargs["day"] = 31 kwargs["weekday"] = relativedelta.SU(-1) if x.time is not None: kwargs["seconds"] = x.time else: # Default is 2AM. kwargs["seconds"] = 7200 if isend: # Convert to standard time, to follow the documented way # of working with the extra hour. See the documentation # of the tzinfo class. delta = self._dst_offset-self._std_offset kwargs["seconds"] -= delta.seconds+delta.days*86400 return relativedelta.relativedelta(**kwargs) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, `self._s`) class _tzicalvtzcomp: def __init__(self, tzoffsetfrom, tzoffsetto, isdst, tzname=None, rrule=None): self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom self.isdst = isdst self.tzname = tzname self.rrule = rrule class _tzicalvtz(datetime.tzinfo): def __init__(self, tzid, comps=[]): self._tzid = tzid self._comps = comps self._cachedate = [] self._cachecomp = [] def _find_comp(self, dt): if len(self._comps) == 1: return self._comps[0] dt = dt.replace(tzinfo=None) try: return self._cachecomp[self._cachedate.index(dt)] except ValueError: pass lastcomp = None lastcompdt = None for comp in self._comps: if not comp.isdst: # Handle the extra hour in DST -> STD compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) else: compdt = comp.rrule.before(dt, inc=True) if compdt and (not lastcompdt or lastcompdt < compdt): lastcompdt = compdt lastcomp = comp if not lastcomp: # RFC says nothing about what to do when a given # time is before the first onset date. We'll look for the # first standard component, or the first component, if # none is found. for comp in self._comps: if not comp.isdst: lastcomp = comp break else: lastcomp = comp[0] self._cachedate.insert(0, dt) self._cachecomp.insert(0, lastcomp) if len(self._cachedate) > 10: self._cachedate.pop() self._cachecomp.pop() return lastcomp def utcoffset(self, dt): return self._find_comp(dt).tzoffsetto def dst(self, dt): comp = self._find_comp(dt) if comp.isdst: return comp.tzoffsetdiff else: return ZERO def tzname(self, dt): return self._find_comp(dt).tzname def __repr__(self): return "<tzicalvtz %s>" % `self._tzid` __reduce__ = object.__reduce__ class tzical: def __init__(self, fileobj): global rrule if not rrule: from dateutil import rrule if isinstance(fileobj, basestring): self._s = fileobj fileobj = open(fileobj) elif hasattr(fileobj, "name"): self._s = fileobj.name else: self._s = `fileobj` self._vtz = {} self._parse_rfc(fileobj.read()) def keys(self): return self._vtz.keys() def get(self, tzid=None): if tzid is None: keys = self._vtz.keys() if len(keys) == 0: raise ValueError, "no timezones defined" elif len(keys) > 1: raise ValueError, "more than one timezone available" tzid = keys[0] return self._vtz.get(tzid) def _parse_offset(self, s): s = s.strip() if not s: raise ValueError, "empty offset" if s[0] in ('+', '-'): signal = (-1,+1)[s[0]=='+'] s = s[1:] else: signal = +1 if len(s) == 4: return (int(s[:2])*3600+int(s[2:])*60)*signal elif len(s) == 6: return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal else: raise ValueError, "invalid offset: "+s def _parse_rfc(self, s): lines = s.splitlines() if not lines: raise ValueError, "empty string" # Unfold i = 0 while i < len(lines): line = lines[i].rstrip() if not line: del lines[i] elif i > 0 and line[0] == " ": lines[i-1] += line[1:] del lines[i] else: i += 1 tzid = None comps = [] invtz = False comptype = None for line in lines: if not line: continue name, value = line.split(':', 1) parms = name.split(';') if not parms: raise ValueError, "empty property name" name = parms[0].upper() parms = parms[1:] if invtz: if name == "BEGIN": if value in ("STANDARD", "DAYLIGHT"): # Process component pass else: raise ValueError, "unknown component: "+value comptype = value founddtstart = False tzoffsetfrom = None tzoffsetto = None rrulelines = [] tzname = None elif name == "END": if value == "VTIMEZONE": if comptype: raise ValueError, \ "component not closed: "+comptype if not tzid: raise ValueError, \ "mandatory TZID not found" if not comps: raise ValueError, \ "at least one component is needed" # Process vtimezone self._vtz[tzid] = _tzicalvtz(tzid, comps) invtz = False elif value == comptype: if not founddtstart: raise ValueError, \ "mandatory DTSTART not found" if tzoffsetfrom is None: raise ValueError, \ "mandatory TZOFFSETFROM not found" if tzoffsetto is None: raise ValueError, \ "mandatory TZOFFSETFROM not found" # Process component rr = None if rrulelines: rr = rrule.rrulestr("\n".join(rrulelines), compatible=True, ignoretz=True, cache=True) comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, (comptype == "DAYLIGHT"), tzname, rr) comps.append(comp) comptype = None else: raise ValueError, \ "invalid component end: "+value elif comptype: if name == "DTSTART": rrulelines.append(line) founddtstart = True elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): rrulelines.append(line) elif name == "TZOFFSETFROM": if parms: raise ValueError, \ "unsupported %s parm: %s "%(name, parms[0]) tzoffsetfrom = self._parse_offset(value) elif name == "TZOFFSETTO": if parms: raise ValueError, \ "unsupported TZOFFSETTO parm: "+parms[0] tzoffsetto = self._parse_offset(value) elif name == "TZNAME": if parms: raise ValueError, \ "unsupported TZNAME parm: "+parms[0] tzname = value elif name == "COMMENT": pass else: raise ValueError, "unsupported property: "+name else: if name == "TZID": if parms: raise ValueError, \ "unsupported TZID parm: "+parms[0] tzid = value elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): pass else: raise ValueError, "unsupported property: "+name elif name == "BEGIN" and value == "VTIMEZONE": tzid = None comps = [] invtz = True def __repr__(self): return "%s(%s)" % (self.__class__.__name__, `self._s`) if sys.platform != "win32": TZFILES = ["/etc/localtime", "localtime"] TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"] else: TZFILES = [] TZPATHS = [] def gettz(name=None): tz = None if not name: try: name = os.environ["TZ"] except KeyError: pass if name is None or name == ":": for filepath in TZFILES: if not os.path.isabs(filepath): filename = filepath for path in TZPATHS: filepath = os.path.join(path, filename) if os.path.isfile(filepath): break else: continue if os.path.isfile(filepath): try: tz = tzfile(filepath) break except (IOError, OSError, ValueError): pass else: tz = tzlocal() else: if name.startswith(":"): name = name[:-1] if os.path.isabs(name): if os.path.isfile(name): tz = tzfile(name) else: tz = None else: for path in TZPATHS: filepath = os.path.join(path, name) if not os.path.isfile(filepath): filepath = filepath.replace(' ','_') if not os.path.isfile(filepath): continue try: tz = tzfile(filepath) break except (IOError, OSError, ValueError): pass else: tz = None if tzwin: try: tz = tzwin(name) except OSError: pass if not tz: from dateutil.zoneinfo import gettz tz = gettz(name) if not tz: for c in name: # name must have at least one offset to be a tzstr if c in "0123456789": try: tz = tzstr(name) except ValueError: pass break else: if name in ("GMT", "UTC"): tz = tzutc() elif name in time.tzname: tz = tzlocal() return tz # vim:ts=4:sw=4:et
mit
zhoulingjun/django
tests/custom_pk/models.py
282
1272
# -*- coding: utf-8 -*- """ Using a custom primary key By default, Django adds an ``"id"`` field to each model. But you can override this behavior by explicitly adding ``primary_key=True`` to a field. """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible from .fields import MyAutoField @python_2_unicode_compatible class Employee(models.Model): employee_code = models.IntegerField(primary_key=True, db_column='code') first_name = models.CharField(max_length=20) last_name = models.CharField(max_length=20) class Meta: ordering = ('last_name', 'first_name') def __str__(self): return "%s %s" % (self.first_name, self.last_name) @python_2_unicode_compatible class Business(models.Model): name = models.CharField(max_length=20, primary_key=True) employees = models.ManyToManyField(Employee) class Meta: verbose_name_plural = 'businesses' def __str__(self): return self.name @python_2_unicode_compatible class Bar(models.Model): id = MyAutoField(primary_key=True, db_index=True) def __str__(self): return repr(self.pk) class Foo(models.Model): bar = models.ForeignKey(Bar, models.CASCADE)
bsd-3-clause
h4ck3rm1k3/ansible
docsite/conf.py
119
6348
# -*- coding: utf-8 -*- # # documentation build configuration file, created by # sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009. # # This file is execfile()d with the current directory set to its # containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed # automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys import os # pip install sphinx_rtd_theme #import sphinx_rtd_theme #html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.append(os.path.abspath('some/directory')) # sys.path.insert(0, os.path.join('ansible', 'lib')) sys.path.append(os.path.abspath('_themes')) VERSION='0.01' AUTHOR='Ansible, Inc' # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Later on, add 'sphinx.ext.viewcode' to the list if you want to have # colorized code generated too for references. # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'Ansible Documentation' copyright = "2013 Ansible, Inc" # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = VERSION # The full version, including alpha/beta/rc tags. release = VERSION # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directories, that shouldn't be # searched for source files. #exclude_dirs = [] # A list of glob-style patterns that should be excluded when looking # for source files. exclude_patterns = ['modules'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- html_theme_path = ['_themes'] html_theme = 'srtd' html_short_title = 'Ansible Documentation' # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. #html_style = 'solar.css' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = 'Ansible Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (within the static path) to place at the top of # the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = 'favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/<name>. html_copy_source = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Poseidodoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class # [howto/manual]). latex_documents = [ ('index', 'ansible.tex', 'Ansible 1.2 Documentation', AUTHOR, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True autoclass_content = 'both'
gpl-3.0
Changaco/oh-mainline
vendor/packages/Django/django/contrib/localflavor/pe/forms.py
109
2399
# -*- coding: utf-8 -*- """ PE-specific Form helpers. """ from __future__ import absolute_import, unicode_literals from django.contrib.localflavor.pe.pe_region import REGION_CHOICES from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import RegexField, CharField, Select from django.utils.translation import ugettext_lazy as _ class PERegionSelect(Select): """ A Select widget that uses a list of Peruvian Regions as its choices. """ def __init__(self, attrs=None): super(PERegionSelect, self).__init__(attrs, choices=REGION_CHOICES) class PEDNIField(CharField): """ A field that validates `Documento Nacional de IdentidadŽ (DNI) numbers. """ default_error_messages = { 'invalid': _("This field requires only numbers."), 'max_digits': _("This field requires 8 digits."), } def __init__(self, max_length=8, min_length=8, *args, **kwargs): super(PEDNIField, self).__init__(max_length, min_length, *args, **kwargs) def clean(self, value): """ Value must be a string in the XXXXXXXX formats. """ value = super(PEDNIField, self).clean(value) if value in EMPTY_VALUES: return '' if not value.isdigit(): raise ValidationError(self.error_messages['invalid']) if len(value) != 8: raise ValidationError(self.error_messages['max_digits']) return value class PERUCField(RegexField): """ This field validates a RUC (Registro Unico de Contribuyentes). A RUC is of the form XXXXXXXXXXX. """ default_error_messages = { 'invalid': _("This field requires only numbers."), 'max_digits': _("This field requires 11 digits."), } def __init__(self, max_length=11, min_length=11, *args, **kwargs): super(PERUCField, self).__init__(max_length, min_length, *args, **kwargs) def clean(self, value): """ Value must be an 11-digit number. """ value = super(PERUCField, self).clean(value) if value in EMPTY_VALUES: return '' if not value.isdigit(): raise ValidationError(self.error_messages['invalid']) if len(value) != 11: raise ValidationError(self.error_messages['max_digits']) return value
agpl-3.0
cosmiclattes/TPBviz
torrent/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/coordseq.py
624
3112
from ctypes import c_double, c_int, c_uint, POINTER from django.contrib.gis.geos.libgeos import GEOM_PTR, CS_PTR from django.contrib.gis.geos.prototypes.errcheck import last_arg_byref, GEOSException from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc ## Error-checking routines specific to coordinate sequences. ## def check_cs_ptr(result, func, cargs): "Error checking on routines that return Geometries." if not result: raise GEOSException('Error encountered checking Coordinate Sequence returned from GEOS C function "%s".' % func.__name__) return result def check_cs_op(result, func, cargs): "Checks the status code of a coordinate sequence operation." if result == 0: raise GEOSException('Could not set value on coordinate sequence') else: return result def check_cs_get(result, func, cargs): "Checking the coordinate sequence retrieval." check_cs_op(result, func, cargs) # Object in by reference, return its value. return last_arg_byref(cargs) ## Coordinate sequence prototype generation functions. ## def cs_int(func): "For coordinate sequence routines that return an integer." func.argtypes = [CS_PTR, POINTER(c_uint)] func.restype = c_int func.errcheck = check_cs_get return func def cs_operation(func, ordinate=False, get=False): "For coordinate sequence operations." if get: # Get routines get double parameter passed-in by reference. func.errcheck = check_cs_get dbl_param = POINTER(c_double) else: func.errcheck = check_cs_op dbl_param = c_double if ordinate: # Get/Set ordinate routines have an extra uint parameter. func.argtypes = [CS_PTR, c_uint, c_uint, dbl_param] else: func.argtypes = [CS_PTR, c_uint, dbl_param] func.restype = c_int return func def cs_output(func, argtypes): "For routines that return a coordinate sequence." func.argtypes = argtypes func.restype = CS_PTR func.errcheck = check_cs_ptr return func ## Coordinate Sequence ctypes prototypes ## # Coordinate Sequence constructors & cloning. cs_clone = cs_output(GEOSFunc('GEOSCoordSeq_clone'), [CS_PTR]) create_cs = cs_output(GEOSFunc('GEOSCoordSeq_create'), [c_uint, c_uint]) get_cs = cs_output(GEOSFunc('GEOSGeom_getCoordSeq'), [GEOM_PTR]) # Getting, setting ordinate cs_getordinate = cs_operation(GEOSFunc('GEOSCoordSeq_getOrdinate'), ordinate=True, get=True) cs_setordinate = cs_operation(GEOSFunc('GEOSCoordSeq_setOrdinate'), ordinate=True) # For getting, x, y, z cs_getx = cs_operation(GEOSFunc('GEOSCoordSeq_getX'), get=True) cs_gety = cs_operation(GEOSFunc('GEOSCoordSeq_getY'), get=True) cs_getz = cs_operation(GEOSFunc('GEOSCoordSeq_getZ'), get=True) # For setting, x, y, z cs_setx = cs_operation(GEOSFunc('GEOSCoordSeq_setX')) cs_sety = cs_operation(GEOSFunc('GEOSCoordSeq_setY')) cs_setz = cs_operation(GEOSFunc('GEOSCoordSeq_setZ')) # These routines return size & dimensions. cs_getsize = cs_int(GEOSFunc('GEOSCoordSeq_getSize')) cs_getdims = cs_int(GEOSFunc('GEOSCoordSeq_getDimensions'))
gpl-3.0
genevolv/dbrev
py/freevolv/models/dbrev/table.py
1
8372
''' dbrev.table can be thought of as a bean or a template. It has only attributes with getters and setters. ''' import logging LOG = logging.getLogger(__name__) # LOG.setLevel(logging.INFO) # Long lines expected. # pylint: disable=C0301 # Cyclic imports protected by functions # pylint: disable=R0401 class Table(object): '''Table class generated from TABLES table.''' def __init__(self, database_name=None, schema_name=None, name=None, py_singular=None, cap_words_singular=None, py_plural=None, cap_words_plural=None, supertype_schema=None, supertype_name=None, primary_key_name=None): self.database_name = database_name self.schema_name = schema_name self.name = name self.py_singular = py_singular self.cap_words_singular = cap_words_singular self.py_plural = py_plural self.cap_words_plural = cap_words_plural self.supertype_schema = supertype_schema self.supertype_name = supertype_name self.primary_key_name = primary_key_name self._database = None self._primary_key = None self._schema = None self._supertype = None self._columns = None self._foreign_keys = None self._tables = None self._unique_key_columns = None self._unique_keys = None def __str__(self): out = 'Table(' if self.database_name != None: out += 'database_name:' + str(self.database_name) + ',' if self.schema_name != None: out += 'schema_name:' + str(self.schema_name) + ',' if self.name != None: out += 'name:' + str(self.name) + ',' if self.py_singular != None: out += 'py_singular:' + str(self.py_singular) + ',' if self.cap_words_singular != None: out += 'cap_words_singular:' + str(self.cap_words_singular) + ',' if self.py_plural != None: out += 'py_plural:' + str(self.py_plural) + ',' if self.cap_words_plural != None: out += 'cap_words_plural:' + str(self.cap_words_plural) + ',' if self.supertype_schema != None: out += 'supertype_schema:' + str(self.supertype_schema) + ',' if self.supertype_name != None: out += 'supertype_name:' + str(self.supertype_name) + ',' if self.primary_key_name != None: out += 'primary_key_name:' + str(self.primary_key_name) if out[-1:] == ',': out = out[:-1] out += ')' return out def get_database(self): ''' Getter method for database.''' if self.database_name != None and self._database == None: from freevolv.models.dbrev import databases_table self._database = databases_table.DatabasesTable.get_instance() \ .get_one(name=self.database_name) return self._database def set_database(self, database): ''' Setter method for database.''' self._database = database database = property(get_database, set_database) def get_primary_key(self): ''' Getter method for primary_key.''' if self.database_name != None and self.schema_name != None and self.name != None and self.primary_key_name != None and self._primary_key == None: from freevolv.models.dbrev import unique_keys_table self._primary_key = unique_keys_table.UniqueKeysTable.get_instance() \ .get_one(database_name=self.database_name, schema_name=self.schema_name, table_name=self.name, name=self.primary_key_name) return self._primary_key def set_primary_key(self, primary_key): ''' Setter method for primary_key.''' self._primary_key = primary_key primary_key = property(get_primary_key, set_primary_key) def get_schema(self): ''' Getter method for schema.''' if self.database_name != None and self.schema_name != None and self._schema == None: from freevolv.models.dbrev import schemas_table self._schema = schemas_table.SchemasTable.get_instance() \ .get_one(database_name=self.database_name, name=self.schema_name) return self._schema def set_schema(self, schema): ''' Setter method for schema.''' self._schema = schema schema = property(get_schema, set_schema) def get_supertype(self): ''' Getter method for supertype.''' if self.database_name != None and self.supertype_schema != None and self.supertype_name != None and self._supertype == None: from freevolv.models.dbrev import tables_table self._supertype = tables_table.TablesTable.get_instance() \ .get_one(database_name=self.database_name, schema_name=self.supertype_schema, name=self.supertype_name) return self._supertype def set_supertype(self, supertype): ''' Setter method for supertype.''' self._supertype = supertype supertype = property(get_supertype, set_supertype) def get_columns(self): ''' Getter method for columns.''' if self.database_name != None and self.schema_name != None and self.name != None and self._columns == None: from freevolv.models.dbrev import columns_table self._columns = columns_table.ColumnsTable.get_instance() \ .get(database_name=self.database_name, schema_name=self.schema_name, table_name=self.name) return self._columns def set_columns(self, columns): ''' Setter method for columns.''' self._columns = columns columns = property(get_columns, set_columns) def get_foreign_keys(self): ''' Getter method for foreign_keys.''' if self.database_name != None and self.schema_name != None and self.name != None and self._foreign_keys == None: from freevolv.models.dbrev import foreign_keys_table self._foreign_keys = foreign_keys_table.ForeignKeysTable.get_instance() \ .get(database_name=self.database_name, schema_name=self.schema_name, table_name=self.name) return self._foreign_keys def set_foreign_keys(self, foreign_keys): ''' Setter method for foreign_keys.''' self._foreign_keys = foreign_keys foreign_keys = property(get_foreign_keys, set_foreign_keys) def get_tables(self): ''' Getter method for tables.''' if self.database_name != None and self.schema_name != None and self.name != None and self._tables == None: from freevolv.models.dbrev import tables_table self._tables = tables_table.TablesTable.get_instance() \ .get(database_name=self.database_name, supertype_schema=self.schema_name, supertype_name=self.name) return self._tables def set_tables(self, tables): ''' Setter method for tables.''' self._tables = tables tables = property(get_tables, set_tables) def get_unique_key_columns(self): ''' Getter method for unique_key_columns.''' if self.database_name != None and self.schema_name != None and self.name != None and self._unique_key_columns == None: from freevolv.models.dbrev import unique_key_columns_table self._unique_key_columns = unique_key_columns_table.UniqueKeyColumnsTable.get_instance() \ .get(database_name=self.database_name, schema_name=self.schema_name, table_name=self.name) return self._unique_key_columns def set_unique_key_columns(self, unique_key_columns): ''' Setter method for unique_key_columns.''' self._unique_key_columns = unique_key_columns unique_key_columns = property(get_unique_key_columns, set_unique_key_columns) def get_unique_keys(self): ''' Getter method for unique_keys.''' if self.database_name != None and self.schema_name != None and self.name != None and self._unique_keys == None: from freevolv.models.dbrev import unique_keys_table self._unique_keys = unique_keys_table.UniqueKeysTable.get_instance() \ .get(database_name=self.database_name, schema_name=self.schema_name, table_name=self.name) return self._unique_keys def set_unique_keys(self, unique_keys): ''' Setter method for unique_keys.''' self._unique_keys = unique_keys unique_keys = property(get_unique_keys, set_unique_keys)
bsd-2-clause
dcamposliz/youtube-dl
youtube_dl/extractor/myvi.py
132
2245
# coding: utf-8 from __future__ import unicode_literals import re from .vimple import SprutoBaseIE class MyviIE(SprutoBaseIE): _VALID_URL = r'''(?x) https?:// myvi\.(?:ru/player|tv)/ (?: (?: embed/html| flash| api/Video/Get )/| content/preloader\.swf\?.*\bid= ) (?P<id>[\da-zA-Z_-]+) ''' _TESTS = [{ 'url': 'http://myvi.ru/player/embed/html/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', 'md5': '571bbdfba9f9ed229dc6d34cc0f335bf', 'info_dict': { 'id': 'f16b2bbd-cde8-481c-a981-7cd48605df43', 'ext': 'mp4', 'title': 'хозяин жизни', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 25, }, }, { 'url': 'http://myvi.ru/player/content/preloader.swf?id=oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wOYf1WFpPfc_bWTKGVf_Zafr0', 'only_matching': True, }, { 'url': 'http://myvi.ru/player/api/Video/Get/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', 'only_matching': True, }, { 'url': 'http://myvi.tv/embed/html/oTGTNWdyz4Zwy_u1nraolwZ1odenTd9WkTnRfIL9y8VOgHYqOHApE575x4_xxS9Vn0?ap=0', 'only_matching': True, }, { 'url': 'http://myvi.ru/player/flash/ocp2qZrHI-eZnHKQBK4cZV60hslH8LALnk0uBfKsB-Q4WnY26SeGoYPi8HWHxu0O30', 'only_matching': True, }] @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//myvi\.(?:ru/player|tv)/(?:embed/html|flash)/[^"]+)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) spruto = self._download_json( 'http://myvi.ru/player/api/Video/Get/%s?sig' % video_id, video_id)['sprutoData'] return self._extract_spruto(spruto, video_id)
unlicense
openstack/ironic
ironic/drivers/modules/agent_power.py
1
8765
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The agent power interface. """ import time from oslo_config import cfg from oslo_log import log import tenacity from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states from ironic.conductor import utils as cond_utils from ironic.drivers import base from ironic.drivers.modules import agent_client CONF = cfg.CONF LOG = log.getLogger(__name__) _POWER_WAIT = 30 class AgentPower(base.PowerInterface): """Power interface using the running agent for power actions.""" def __init__(self): super(AgentPower, self).__init__() if not CONF.deploy.fast_track: raise exception.InvalidParameterValue( _('[deploy]fast_track must be True to enable the agent ' 'power interface')) self._client = agent_client.AgentClient() def get_properties(self): """Return the properties of the interface. :returns: dictionary of <property name>:<property description> entries. """ return {} def validate(self, task): """Validate the driver-specific Node deployment info. :param task: A TaskManager instance containing the node to act on. :raises: InvalidParameterValue on malformed parameter(s) """ # NOTE(dtantsur): the fast_track option is mutable, so we have to check # it again on validation. if not CONF.deploy.fast_track: raise exception.InvalidParameterValue( _('[deploy]fast_track must be True to enable the agent ' 'power interface')) # TODO(dtantsur): support ACTIVE nodes if not cond_utils.agent_is_alive(task.node): raise exception.InvalidParameterValue( _('Agent seems offline for node %s, the agent power interface ' 'cannot be used') % task.node.uuid) def supports_power_sync(self, task): """Check if power sync is supported for the given node. Not supported for the agent power since it is not possible to power on/off nodes. :param task: A TaskManager instance containing the node to act on with a **shared** lock. :returns: boolean, whether power sync is supported. """ return False def get_supported_power_states(self, task): """Get a list of the supported power states. Only contains REBOOT. :param task: A TaskManager instance containing the node to act on. :returns: A list with the supported power states defined in :mod:`ironic.common.states`. """ return [states.REBOOT, states.SOFT_REBOOT] def get_power_state(self, task): """Return the power state of the task's node. Essentially, the only known state is POWER ON, everything else is an error (or more precisely ``None``). :param task: A TaskManager instance containing the node to act on. :returns: A power state. One of :mod:`ironic.common.states`. """ # TODO(dtantsur): support ACTIVE nodes if cond_utils.agent_is_alive(task.node): return states.POWER_ON else: LOG.error('Node %s is not fast-track-able, cannot determine ' 'its power state via the "agent" power interface', task.node.uuid) return None def set_power_state(self, task, power_state, timeout=None): """Set the power state of the task's node. :param task: A TaskManager instance containing the node to act on. :param power_state: Power state from :mod:`ironic.common.states`. Only REBOOT and SOFT_REBOOT are supported and are synonymous. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. :raises: PowerStateFailure on non-supported power state. """ if power_state in (states.REBOOT, states.SOFT_REBOOT): return self.reboot(task) else: LOG.error('Power state %(state)s is not implemented for node ' '%(node)s using the "agent" power interface', {'node': task.node.uuid, 'state': power_state}) raise exception.PowerStateFailure(pstate=power_state) def reboot(self, task, timeout=None): """Perform a reboot of the task's node. Only soft reboot is implemented. :param task: A TaskManager instance containing the node to act on. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. """ node = task.node self._client.reboot(node) info = node.driver_internal_info # NOTE(dtantsur): wipe the agent token, otherwise the rebooted agent # won't be able to heartbeat. This is mostly a precaution since the # calling code in conductor is expected to handle it. if not info.get('agent_secret_token_pregenerated'): info.pop('agent_secret_token', None) # NOTE(dtantsur): the URL may change on reboot, wipe it as well (but # only after we call reboot). info.pop('agent_url', None) node.driver_internal_info = info node.save() LOG.debug('Requested reboot of node %(node)s via the agent, waiting ' '%(wait)d seconds for the node to power down', {'node': task.node.uuid, 'wait': _POWER_WAIT}) time.sleep(_POWER_WAIT) if (node.provision_state in (states.DEPLOYING, states.CLEANING) and (node.driver_internal_info.get('deployment_reboot') or node.driver_internal_info.get('cleaning_reboot'))): # NOTE(dtantsur): we need to downgrade the lock otherwise # heartbeats won't be processed. It should not have side effects # for nodes in DEPLOYING/CLEANING. task.downgrade_lock() try: self._wait_for_reboot(task, timeout) finally: # The caller probably expects a lock, so re-acquire it task.upgrade_lock() def _wait_for_reboot(self, task, timeout): wait = CONF.agent.post_deploy_get_power_state_retry_interval if not timeout: timeout = CONF.agent.post_deploy_get_power_state_retries * wait @tenacity.retry( stop=tenacity.stop_after_delay(timeout), retry=(tenacity.retry_if_result(lambda result: not result) | tenacity.retry_if_exception_type( exception.AgentConnectionFailed)), wait=tenacity.wait_fixed(wait), reraise=True) def _wait_until_rebooted(task): try: status = self._client.get_commands_status( task.node, retry_connection=False, expect_errors=True) except exception.AgentConnectionFailed: LOG.debug('Still waiting for the agent to come back on the ' 'node %s', task.node.uuid) raise if any(cmd['command_name'] == agent_client.REBOOT_COMMAND for cmd in status): LOG.debug('Still waiting for the agent to power off on the ' 'node %s', task.node.uuid) return False return True try: _wait_until_rebooted(task) except exception.AgentConnectionFailed as exc: msg = _('Agent failed to come back on %(node)s with the "agent" ' 'power interface: %(exc)s') % { 'node': task.node.uuid, 'exc': exc} LOG.error(msg) raise exception.PowerStateFailure(msg) except Exception as exc: LOG.error('Could not reboot node %(node)s with the "agent" power ' 'interface: %(exc)s', {'node': task.node.uuid, 'exc': exc}) raise exception.PowerStateFailure( _('Unexpected error when rebooting through the agent: %s') % exc)
apache-2.0
rrrrrr8/vnpy
vnpy/api/ctp/py3/pyscript/generate_struct.py
40
1412
# encoding: UTF-8 __author__ = 'CHENXY' from ctp_data_type import * def main(): """主函数""" fcpp = open('ThostFtdcUserApiStruct.h', 'r') fpy = open('ctp_struct.py', 'w') fpy.write('# encoding: UTF-8\n') fpy.write('\n') fpy.write('structDict = {}\n') fpy.write('\n') for no, line in enumerate(fcpp): # 结构体申明注释 if '///' in line and '\t' not in line: py_line = '#' + line[3:] # 结构体变量注释 elif '\t///' in line: py_line = '#' + line[4:] # 结构体申明 elif 'struct ' in line: content = line.split(' ') name = content[1].replace('\n','') py_line = '%s = {}\n' % name # 结构体变量 elif '\t' in line and '///' not in line: content = line.split('\t') typedef = content[1] type_ = typedefDict[typedef] variable = content[2].replace(';\n', "") py_line = '%s["%s"] = "%s"\n' % (name, variable, type_) # 结构体结束 elif '}' in line: py_line = "structDict['%s'] = %s\n\n" % (name, name) # 结构体开始 elif '{' in line: py_line = '' # 其他 else: py_line = '\n' fpy.write(py_line.decode('gbk').encode('utf-8')) if __name__ == '__main__': main()
mit
mhbu50/erpnext
erpnext/hr/doctype/shift_assignment/shift_assignment.py
3
11412
# -*- coding: utf-8 -*- # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.model.document import Document from frappe.utils import cint, cstr, date_diff, flt, formatdate, getdate, now_datetime, nowdate from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee from erpnext.hr.doctype.holiday_list.holiday_list import is_holiday from datetime import timedelta, datetime class ShiftAssignment(Document): def validate(self): self.validate_overlapping_dates() if self.end_date and self.end_date <= self.start_date: frappe.throw(_("End Date must not be lesser than Start Date")) def validate_overlapping_dates(self): if not self.name: self.name = "New Shift Assignment" condition = """and ( end_date is null or %(start_date)s between start_date and end_date """ if self.end_date: condition += """ or %(end_date)s between start_date and end_date or start_date between %(start_date)s and %(end_date)s ) """ else: condition += """ ) """ assigned_shifts = frappe.db.sql(""" select name, shift_type, start_date ,end_date, docstatus, status from `tabShift Assignment` where employee=%(employee)s and docstatus = 1 and name != %(name)s and status = "Active" {0} """.format(condition), { "employee": self.employee, "shift_type": self.shift_type, "start_date": self.start_date, "end_date": self.end_date, "name": self.name }, as_dict = 1) if len(assigned_shifts): self.throw_overlap_error(assigned_shifts[0]) def throw_overlap_error(self, shift_details): shift_details = frappe._dict(shift_details) if shift_details.docstatus == 1 and shift_details.status == "Active": msg = _("Employee {0} already has Active Shift {1}: {2}").format(frappe.bold(self.employee), frappe.bold(self.shift_type), frappe.bold(shift_details.name)) if shift_details.start_date: msg += _(" from {0}").format(getdate(self.start_date).strftime("%d-%m-%Y")) title = "Ongoing Shift" if shift_details.end_date: msg += _(" to {0}").format(getdate(self.end_date).strftime("%d-%m-%Y")) title = "Active Shift" if msg: frappe.throw(msg, title=title) @frappe.whitelist() def get_events(start, end, filters=None): events = [] employee = frappe.db.get_value("Employee", {"user_id": frappe.session.user}, ["name", "company"], as_dict=True) if employee: employee, company = employee.name, employee.company else: employee='' company=frappe.db.get_value("Global Defaults", None, "default_company") from frappe.desk.reportview import get_filters_cond conditions = get_filters_cond("Shift Assignment", filters, []) add_assignments(events, start, end, conditions=conditions) return events def add_assignments(events, start, end, conditions=None): query = """select name, start_date, end_date, employee_name, employee, docstatus, shift_type from `tabShift Assignment` where start_date >= %(start_date)s or end_date <= %(end_date)s or (%(start_date)s between start_date and end_date and %(end_date)s between start_date and end_date) and docstatus = 1""" if conditions: query += conditions records = frappe.db.sql(query, {"start_date":start, "end_date":end}, as_dict=True) shift_timing_map = get_shift_type_timing([d.shift_type for d in records]) for d in records: daily_event_start = d.start_date daily_event_end = d.end_date if d.end_date else getdate() delta = timedelta(days=1) while daily_event_start <= daily_event_end: start_timing = frappe.utils.get_datetime(daily_event_start)+ shift_timing_map[d.shift_type]['start_time'] end_timing = frappe.utils.get_datetime(daily_event_start)+ shift_timing_map[d.shift_type]['end_time'] daily_event_start += delta e = { "name": d.name, "doctype": "Shift Assignment", "start_date": start_timing, "end_date": end_timing, "title": cstr(d.employee_name) + ": "+ \ cstr(d.shift_type), "docstatus": d.docstatus, "allDay": 0 } if e not in events: events.append(e) return events def get_shift_type_timing(shift_types): shift_timing_map = {} data = frappe.get_all("Shift Type", filters = {"name": ("IN", shift_types)}, fields = ['name', 'start_time', 'end_time']) for d in data: shift_timing_map[d.name] = d return shift_timing_map def get_employee_shift(employee, for_date=nowdate(), consider_default_shift=False, next_shift_direction=None): """Returns a Shift Type for the given employee on the given date. (excluding the holidays) :param employee: Employee for which shift is required. :param for_date: Date on which shift are required :param consider_default_shift: If set to true, default shift is taken when no shift assignment is found. :param next_shift_direction: One of: None, 'forward', 'reverse'. Direction to look for next shift if shift not found on given date. """ default_shift = frappe.db.get_value('Employee', employee, 'default_shift') shift_type_name = None shift_assignment_details = frappe.db.get_value('Shift Assignment', {'employee':employee, 'start_date':('<=', for_date), 'docstatus': '1', 'status': "Active"}, ['shift_type', 'end_date']) if shift_assignment_details: shift_type_name = shift_assignment_details[0] # if end_date present means that shift is over after end_date else it is a ongoing shift. if shift_assignment_details[1] and for_date >= shift_assignment_details[1] : shift_type_name = None if not shift_type_name and consider_default_shift: shift_type_name = default_shift if shift_type_name: holiday_list_name = frappe.db.get_value('Shift Type', shift_type_name, 'holiday_list') if not holiday_list_name: holiday_list_name = get_holiday_list_for_employee(employee, False) if holiday_list_name and is_holiday(holiday_list_name, for_date): shift_type_name = None if not shift_type_name and next_shift_direction: MAX_DAYS = 366 if consider_default_shift and default_shift: direction = -1 if next_shift_direction == 'reverse' else +1 for i in range(MAX_DAYS): date = for_date+timedelta(days=direction*(i+1)) shift_details = get_employee_shift(employee, date, consider_default_shift, None) if shift_details: shift_type_name = shift_details.shift_type.name for_date = date break else: direction = '<' if next_shift_direction == 'reverse' else '>' sort_order = 'desc' if next_shift_direction == 'reverse' else 'asc' dates = frappe.db.get_all('Shift Assignment', ['start_date', 'end_date'], {'employee':employee, 'start_date':(direction, for_date), 'docstatus': '1', "status": "Active"}, as_list=True, limit=MAX_DAYS, order_by="start_date "+sort_order) if dates: for date in dates: if date[1] and date[1] < for_date: continue shift_details = get_employee_shift(employee, date[0], consider_default_shift, None) if shift_details: shift_type_name = shift_details.shift_type.name for_date = date[0] break return get_shift_details(shift_type_name, for_date) def get_employee_shift_timings(employee, for_timestamp=now_datetime(), consider_default_shift=False): """Returns previous shift, current/upcoming shift, next_shift for the given timestamp and employee """ # write and verify a test case for midnight shift. prev_shift = curr_shift = next_shift = None curr_shift = get_employee_shift(employee, for_timestamp.date(), consider_default_shift, 'forward') if curr_shift: next_shift = get_employee_shift(employee, curr_shift.start_datetime.date()+timedelta(days=1), consider_default_shift, 'forward') prev_shift = get_employee_shift(employee, for_timestamp.date()+timedelta(days=-1), consider_default_shift, 'reverse') if curr_shift: if prev_shift: curr_shift.actual_start = prev_shift.end_datetime if curr_shift.actual_start < prev_shift.end_datetime else curr_shift.actual_start prev_shift.actual_end = curr_shift.actual_start if prev_shift.actual_end > curr_shift.actual_start else prev_shift.actual_end if next_shift: next_shift.actual_start = curr_shift.end_datetime if next_shift.actual_start < curr_shift.end_datetime else next_shift.actual_start curr_shift.actual_end = next_shift.actual_start if curr_shift.actual_end > next_shift.actual_start else curr_shift.actual_end return prev_shift, curr_shift, next_shift def get_shift_details(shift_type_name, for_date=nowdate()): """Returns Shift Details which contain some additional information as described below. 'shift_details' contains the following keys: 'shift_type' - Object of DocType Shift Type, 'start_datetime' - Date and Time of shift start on given date, 'end_datetime' - Date and Time of shift end on given date, 'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time', 'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero) :param shift_type_name: shift type name for which shift_details is required. :param for_date: Date on which shift_details are required """ if not shift_type_name: return None shift_type = frappe.get_doc('Shift Type', shift_type_name) start_datetime = datetime.combine(for_date, datetime.min.time()) + shift_type.start_time for_date = for_date + timedelta(days=1) if shift_type.start_time > shift_type.end_time else for_date end_datetime = datetime.combine(for_date, datetime.min.time()) + shift_type.end_time actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time) return frappe._dict({ 'shift_type': shift_type, 'start_datetime': start_datetime, 'end_datetime': end_datetime, 'actual_start': actual_start, 'actual_end': actual_end }) def get_actual_start_end_datetime_of_shift(employee, for_datetime, consider_default_shift=False): """Takes a datetime and returns the 'actual' start datetime and end datetime of the shift in which the timestamp belongs. Here 'actual' means - taking in to account the "begin_check_in_before_shift_start_time" and "allow_check_out_after_shift_end_time". None is returned if the timestamp is outside any actual shift timings. Shift Details is also returned(current/upcoming i.e. if timestamp not in any actual shift then details of next shift returned) """ actual_shift_start = actual_shift_end = shift_details = None shift_timings_as_per_timestamp = get_employee_shift_timings(employee, for_datetime, consider_default_shift) timestamp_list = [] for shift in shift_timings_as_per_timestamp: if shift: timestamp_list.extend([shift.actual_start, shift.actual_end]) else: timestamp_list.extend([None, None]) timestamp_index = None for index, timestamp in enumerate(timestamp_list): if timestamp and for_datetime <= timestamp: timestamp_index = index break if timestamp_index and timestamp_index%2 == 1: shift_details = shift_timings_as_per_timestamp[int((timestamp_index-1)/2)] actual_shift_start = shift_details.actual_start actual_shift_end = shift_details.actual_end elif timestamp_index: shift_details = shift_timings_as_per_timestamp[int(timestamp_index/2)] return actual_shift_start, actual_shift_end, shift_details
gpl-3.0
guewen/OpenUpgrade
addons/delivery/delivery.py
22
12548
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields,osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp class delivery_carrier(osv.osv): _name = "delivery.carrier" _description = "Carrier" def name_get(self, cr, uid, ids, context=None): if not len(ids): return [] if context is None: context = {} order_id = context.get('order_id',False) if not order_id: res = super(delivery_carrier, self).name_get(cr, uid, ids, context=context) else: order = self.pool.get('sale.order').browse(cr, uid, order_id, context=context) currency = order.pricelist_id.currency_id.name or '' res = [(r['id'], r['name']+' ('+(str(r['price']))+' '+currency+')') for r in self.read(cr, uid, ids, ['name', 'price'], context)] return res def get_price(self, cr, uid, ids, field_name, arg=None, context=None): res={} if context is None: context = {} sale_obj=self.pool.get('sale.order') grid_obj=self.pool.get('delivery.grid') for carrier in self.browse(cr, uid, ids, context=context): order_id=context.get('order_id',False) price=False if order_id: order = sale_obj.browse(cr, uid, order_id, context=context) carrier_grid=self.grid_get(cr,uid,[carrier.id],order.partner_shipping_id.id,context) if carrier_grid: price=grid_obj.get_price(cr, uid, carrier_grid, order, time.strftime('%Y-%m-%d'), context) else: price = 0.0 res[carrier.id]=price return res _columns = { 'name': fields.char('Delivery Method', size=64, required=True), 'partner_id': fields.many2one('res.partner', 'Transport Company', required=True, help="The partner that is doing the delivery service."), 'product_id': fields.many2one('product.product', 'Delivery Product', required=True), 'grids_id': fields.one2many('delivery.grid', 'carrier_id', 'Delivery Grids'), 'price' : fields.function(get_price, string='Price'), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery carrier without removing it."), 'normal_price': fields.float('Normal Price', help="Keep empty if the pricing depends on the advanced pricing per destination"), 'free_if_more_than': fields.boolean('Free If Order Total Amount Is More Than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping"), 'amount': fields.float('Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency"), 'use_detailed_pricelist': fields.boolean('Advanced Pricing per Destination', help="Check this box if you want to manage delivery prices that depends on the destination, the weight, the total of the order, etc."), 'pricelist_ids': fields.one2many('delivery.grid', 'carrier_id', 'Advanced Pricing'), } _defaults = { 'active': 1, 'free_if_more_than': False, } def grid_get(self, cr, uid, ids, contact_id, context=None): contact = self.pool.get('res.partner').browse(cr, uid, contact_id, context=context) for carrier in self.browse(cr, uid, ids, context=context): for grid in carrier.grids_id: get_id = lambda x: x.id country_ids = map(get_id, grid.country_ids) state_ids = map(get_id, grid.state_ids) if country_ids and not contact.country_id.id in country_ids: continue if state_ids and not contact.state_id.id in state_ids: continue if grid.zip_from and (contact.zip or '')< grid.zip_from: continue if grid.zip_to and (contact.zip or '')> grid.zip_to: continue return grid.id return False def create_grid_lines(self, cr, uid, ids, vals, context=None): if context is None: context = {} grid_line_pool = self.pool.get('delivery.grid.line') grid_pool = self.pool.get('delivery.grid') for record in self.browse(cr, uid, ids, context=context): # if using advanced pricing per destination: do not change if record.use_detailed_pricelist: continue # not using advanced pricing per destination: override grid grid_id = grid_pool.search(cr, uid, [('carrier_id', '=', record.id)], context=context) if grid_id and not (record.normal_price or record.free_if_more_than): grid_pool.unlink(cr, uid, grid_id, context=context) # Check that float, else 0.0 is False if not (isinstance(record.normal_price,float) or record.free_if_more_than): continue if not grid_id: grid_data = { 'name': record.name, 'carrier_id': record.id, 'sequence': 10, } grid_id = [grid_pool.create(cr, uid, grid_data, context=context)] lines = grid_line_pool.search(cr, uid, [('grid_id','in',grid_id)], context=context) if lines: grid_line_pool.unlink(cr, uid, lines, context=context) #create the grid lines if record.free_if_more_than: line_data = { 'grid_id': grid_id and grid_id[0], 'name': _('Free if more than %.2f') % record.amount, 'type': 'price', 'operator': '>=', 'max_value': record.amount, 'standard_price': 0.0, 'list_price': 0.0, } grid_line_pool.create(cr, uid, line_data, context=context) if isinstance(record.normal_price,float): line_data = { 'grid_id': grid_id and grid_id[0], 'name': _('Default price'), 'type': 'price', 'operator': '>=', 'max_value': 0.0, 'standard_price': record.normal_price, 'list_price': record.normal_price, } grid_line_pool.create(cr, uid, line_data, context=context) return True def write(self, cr, uid, ids, vals, context=None): if isinstance(ids, (int,long)): ids = [ids] res = super(delivery_carrier, self).write(cr, uid, ids, vals, context=context) self.create_grid_lines(cr, uid, ids, vals, context=context) return res def create(self, cr, uid, vals, context=None): res_id = super(delivery_carrier, self).create(cr, uid, vals, context=context) self.create_grid_lines(cr, uid, [res_id], vals, context=context) return res_id class delivery_grid(osv.osv): _name = "delivery.grid" _description = "Delivery Grid" _columns = { 'name': fields.char('Grid Name', size=64, required=True), 'sequence': fields.integer('Sequence', size=64, required=True, help="Gives the sequence order when displaying a list of delivery grid."), 'carrier_id': fields.many2one('delivery.carrier', 'Carrier', required=True, ondelete='cascade'), 'country_ids': fields.many2many('res.country', 'delivery_grid_country_rel', 'grid_id', 'country_id', 'Countries'), 'state_ids': fields.many2many('res.country.state', 'delivery_grid_state_rel', 'grid_id', 'state_id', 'States'), 'zip_from': fields.char('Start Zip', size=12), 'zip_to': fields.char('To Zip', size=12), 'line_ids': fields.one2many('delivery.grid.line', 'grid_id', 'Grid Line'), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery grid without removing it."), } _defaults = { 'active': lambda *a: 1, 'sequence': lambda *a: 1, } _order = 'sequence' def get_price(self, cr, uid, id, order, dt, context=None): total = 0 weight = 0 volume = 0 quantity = 0 product_uom_obj = self.pool.get('product.uom') for line in order.order_line: if not line.product_id or line.is_delivery: continue q = product_uom_obj._compute_qty(cr, uid, line.product_uom.id, line.product_uos_qty, line.product_id.uom_id.id) weight += (line.product_id.weight or 0.0) * q volume += (line.product_id.volume or 0.0) * q quantity += q total = order.amount_total or 0.0 return self.get_price_from_picking(cr, uid, id, total,weight, volume, quantity, context=context) def get_price_from_picking(self, cr, uid, id, total, weight, volume, quantity, context=None): grid = self.browse(cr, uid, id, context=context) price = 0.0 ok = False price_dict = {'price': total, 'volume':volume, 'weight': weight, 'wv':volume*weight, 'quantity': quantity} for line in grid.line_ids: test = eval(line.type+line.operator+str(line.max_value), price_dict) if test: if line.price_type=='variable': price = line.list_price * price_dict[line.variable_factor] else: price = line.list_price ok = True break if not ok: raise osv.except_osv(_("Unable to fetch delivery method!"), _("Selected product in the delivery method doesn't fulfill any of the delivery grid(s) criteria.")) return price class delivery_grid_line(osv.osv): _name = "delivery.grid.line" _description = "Delivery Grid Line" _columns = { 'name': fields.char('Name', size=64, required=True), 'sequence': fields.integer('Sequence', size=64, required=True, help="Gives the sequence order when calculating delivery grid."), 'grid_id': fields.many2one('delivery.grid', 'Grid',required=True, ondelete='cascade'), 'type': fields.selection([('weight','Weight'),('volume','Volume'),\ ('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity')],\ 'Variable', required=True), 'operator': fields.selection([('==','='),('<=','<='),('<','<'),('>=','>='),('>','>')], 'Operator', required=True), 'max_value': fields.float('Maximum Value', required=True), 'price_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Price Type', required=True), 'variable_factor': fields.selection([('weight','Weight'),('volume','Volume'),('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity')], 'Variable Factor', required=True), 'list_price': fields.float('Sale Price', digits_compute= dp.get_precision('Product Price'), required=True), 'standard_price': fields.float('Cost Price', digits_compute= dp.get_precision('Product Price'), required=True), } _defaults = { 'sequence': lambda *args: 10, 'type': lambda *args: 'weight', 'operator': lambda *args: '<=', 'price_type': lambda *args: 'fixed', 'variable_factor': lambda *args: 'weight', } _order = 'sequence, list_price' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rkhleics/wagtailmenus
wagtailmenus/utils/tests/test_misc.py
2
10973
from django.test import RequestFactory, TestCase, modify_settings from distutils.version import LooseVersion from wagtail.core import __version__ as wagtail_version from wagtail.core.models import Page, Site from wagtailmenus.conf import defaults from wagtailmenus.utils.misc import derive_page, derive_section_root, get_site_from_request from wagtailmenus.tests.models import ( ArticleListPage, ArticlePage, LowLevelPage, TopLevelPage ) class TestDerivePage(TestCase): """Tests for wagtailmenus.utils.misc.derive_page()""" fixtures = ['test.json'] def setUp(self): # Every test needs access to the request factory. self.rf = RequestFactory() self.site = Site.objects.select_related('root_page').first() # Prefetch the specific page, so that it doesn't count # toward the counted queries self.site.root_page.specific def _run_test( self, url, expected_page, expected_num_queries, full_url_match_expected, accept_best_match=True, max_subsequent_route_failures=3 ): request = self.rf.get(url) # Set these to improve efficiency request.site = self.site request._wagtail_cached_site_root_paths = Site.get_site_root_paths() # Run tests with self.assertNumQueries(expected_num_queries): page, full_url_match = derive_page( request, self.site, accept_best_match, max_subsequent_route_failures, ) self.assertEqual(page, expected_page) self.assertIs(full_url_match, full_url_match_expected) def test_simple_full_url_match(self): """ Routing should use 4 queries here: 1. Look up 'superheroes' from site root 2. Fetch specific version of 'superheroes' 3. Look up 'marvel-comics' from 'superheroes' 4. Fetch specific version of 'marvel-comics' """ self._run_test( url='/superheroes/marvel-comics/', expected_page=LowLevelPage.objects.get(slug='marvel-comics'), expected_num_queries=4, full_url_match_expected=True, ) def test_article_list_full_url_match(self): """ Routing should use 4 queries here: 1. Look up 'news-and-events' from site root 2. Fetch specific version of 'news-and-events' 3. Look up 'latest-news' from 'news-and-events' 4. Fetch specific version of 'latest-news' """ self._run_test( url='/news-and-events/latest-news/2016/04/', expected_page=ArticleListPage.objects.get(slug='latest-news'), expected_num_queries=4, full_url_match_expected=True, ) def test_article_full_url_match(self): """ Routing should use 5 queries here: 1. Look up 'news-and-events' from site root 2. Fetch specific version of 'news-and-events' 3. Look up 'latest-news' from 'news-and-events' 4. Fetch specific version of 'latest-news' 5. Look up 'article-one' from 'latest-news' """ self._run_test( url='/news-and-events/latest-news/2016/04/18/article-one/', expected_page=ArticlePage.objects.get(slug='article-one'), expected_num_queries=5, full_url_match_expected=True, ) def test_simple_partial_match(self): """ Routing should use 4 queries here: 1. Look up 'about-us' from site root 2. Fetch specific version of 'about-us' 3. Attempt to look up 'blah' from 'about-us' """ self._run_test( url='/about-us/blah/', expected_page=TopLevelPage.objects.get(slug='about-us'), expected_num_queries=3, full_url_match_expected=False, ) def test_article_list_partial_match(self): """ Routing should use 4 queries here: 1. Look up 'news-and-events' from site root 2. Fetch specific version of 'news-and-events' 3. Look up 'latest-news' from 'news-and-events' 4. Fetch specific version of 'latest-news' 5. Attempt to look up 'blah' from 'latest-news' 6. Attempt to look up 'blah/blah/' from 'latest-news' """ self._run_test( url='/news-and-events/latest-news/2016/04/01/blah/blah/', expected_page=ArticleListPage.objects.get(slug='latest-news'), expected_num_queries=6, full_url_match_expected=False, ) def test_partial_match_with_max_subsequent_route_failures(self): """ Routing should use 5 queries here: 1. Look up 'about-us' from site root 2. Fetch specific version of 'about-us' 3. Attempt to look up 'blah' from 'about-us' 4. Attempt to look up 'blah/blah/' from 'about-us' 5. Attempt to look up 'blah/blah/blah/' from 'about-us' """ self._run_test( url='/about-us/blah/blah/blah/blah/blah', expected_page=TopLevelPage.objects.get(slug='about-us'), expected_num_queries=5, full_url_match_expected=False, ) def test_no_match(self): """ This test also shows that using the ``max_subsequent_route_failures`` option directly affects the number of route() attempts that will be made, even when """ common_test_kwargs = { 'url': '/blah/blah/blah/blah/blah', 'expected_page': None, 'full_url_match_expected': False, } for i in range(1, 3): self._run_test( expected_num_queries=i, max_subsequent_route_failures=i, **common_test_kwargs ) def test_exact_match_only_with_success(self): self._run_test( url='/about-us/', expected_page=TopLevelPage.objects.get(slug='about-us'), expected_num_queries=2, full_url_match_expected=True, accept_best_match=False ) def test_exact_match_only_without_success(self): self._run_test( url='/blah/blah/blah/blah/blah', expected_page=None, expected_num_queries=1, full_url_match_expected=False, accept_best_match=False ) class TestDeriveSectionRoot(TestCase): """Tests for wagtailmenus.utils.misc.derive_section_root()""" fixtures = ['test.json'] def setUp(self): self.page_with_depth_of_2 = Page.objects.get( depth=2, url_path='/home/' ) self.page_with_depth_of_3 = Page.objects.get( depth=3, url_path='/home/about-us/' ) self.page_with_depth_of_4 = Page.objects.get( depth=4, url_path='/home/about-us/meet-the-team/' ) self.page_with_depth_of_5 = Page.objects.get( depth=5, url_path='/home/about-us/meet-the-team/staff-member-one/' ) def test_returns_same_page_if_provided_page_is_section_root(self): # Using the default section root depth of 3 with self.assertNumQueries(1): # One query should be used to get the specific page result = derive_section_root(self.page_with_depth_of_3) # The function should return the specific version of the same page self.assertEqual(result, self.page_with_depth_of_3.specific) # Using a custom section root depth of 4 with self.settings(WAGTAILMENUS_SECTION_ROOT_DEPTH=4): with self.assertNumQueries(1): # One query should be used to get the specific page result = derive_section_root(self.page_with_depth_of_4) # The function should return the specific version of the same page self.assertEqual(result, self.page_with_depth_of_4.specific) def test_returns_section_root_if_provided_page_is_a_descendant_of_one(self): # Using the default section root depth of 3 with self.assertNumQueries(2): # Two queries should be used to identify the page # and to get the specific version result = derive_section_root(self.page_with_depth_of_5) self.assertEqual(result.depth, defaults.SECTION_ROOT_DEPTH) self.assertIsInstance(result, TopLevelPage) # Using a custom section root depth of 4 with self.settings(WAGTAILMENUS_SECTION_ROOT_DEPTH=4): with self.assertNumQueries(2): result = derive_section_root(self.page_with_depth_of_5) self.assertEqual(result.depth, 4) self.assertIsInstance(result, LowLevelPage) def test_returns_none_if_provided_page_is_not_a_descendant_of_a_section_root(self): # Using the default section root depth of 3 with self.assertNumQueries(0): result = derive_section_root(self.page_with_depth_of_2) self.assertIs(result, None) # Using a custom section root depth of 4 with self.settings(WAGTAILMENUS_SECTION_ROOT_DEPTH=4): with self.assertNumQueries(0): result = derive_section_root(self.page_with_depth_of_3) self.assertIs(result, None) class TestGetSiteFromRequest(TestCase): """Tests for wagtailmenus.utils.misc.get_site_from_request()""" fixtures = ['test.json'] def setUp(self): # URL to request during test self.url = '/superheroes/marvel-comics/' # Establish if Wagtail is v2.9 or above if LooseVersion(wagtail_version) >= LooseVersion('2.9'): self.is_wagtail_29_or_above = True else: self.is_wagtail_29_or_above = False def _run_test(self): """ Confirm that the Site returned by get_site_from_request() is a Wagtail Site instance. """ request = self.client.get(self.url).wsgi_request site = get_site_from_request(request) self.assertIsInstance(site, Site) def test_with_wagtail_site_in_request(self): """ Test when Wagtail Site exists at request.site. """ self._run_test() @modify_settings(MIDDLEWARE={ 'append': 'django.contrib.sites.middleware.CurrentSiteMiddleware', 'remove': 'wagtail.core.middleware.SiteMiddleware', }) def test_with_django_site_in_request_wagtail_29_and_above(self): """ Test when only a Django Site exists at request.site for Wagtail 2.9 and above. """ if self.is_wagtail_29_or_above: self._run_test() @modify_settings(MIDDLEWARE={'remove': 'wagtail.core.middleware.SiteMiddleware'}) def test_with_no_site_in_request_wagtail_29_and_above(self): """ Test when no Site object exists at request.site for Wagtail 2.9 and above. """ if self.is_wagtail_29_or_above: self._run_test()
mit
Jionglun/2015cd_midterm2
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_case.py
738
51689
import difflib import pprint import pickle import re import sys import warnings import weakref import inspect from copy import deepcopy from test import support import unittest from .support import ( TestEquality, TestHashing, LoggingResult, ResultWithNoStartTestRunStopTestRun ) class Test(object): "Keep these TestCase classes out of the main namespace" class Foo(unittest.TestCase): def runTest(self): pass def test1(self): pass class Bar(Foo): def test2(self): pass class LoggingTestCase(unittest.TestCase): """A test case which logs its calls.""" def __init__(self, events): super(Test.LoggingTestCase, self).__init__('test') self.events = events def setUp(self): self.events.append('setUp') def test(self): self.events.append('test') def tearDown(self): self.events.append('tearDown') class Test_TestCase(unittest.TestCase, TestEquality, TestHashing): ### Set up attributes used by inherited tests ################################################################ # Used by TestHashing.test_hash and TestEquality.test_eq eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))] # Used by TestEquality.test_ne ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')), (Test.Foo('test1'), Test.Bar('test1')), (Test.Foo('test1'), Test.Bar('test2'))] ################################################################ ### /Set up attributes used by inherited tests # "class TestCase([methodName])" # ... # "Each instance of TestCase will run a single test method: the # method named methodName." # ... # "methodName defaults to "runTest"." # # Make sure it really is optional, and that it defaults to the proper # thing. def test_init__no_test_name(self): class Test(unittest.TestCase): def runTest(self): raise MyException() def test(self): pass self.assertEqual(Test().id()[-13:], '.Test.runTest') # test that TestCase can be instantiated with no args # primarily for use at the interactive interpreter test = unittest.TestCase() test.assertEqual(3, 3) with test.assertRaises(test.failureException): test.assertEqual(3, 2) with self.assertRaises(AttributeError): test.run() # "class TestCase([methodName])" # ... # "Each instance of TestCase will run a single test method: the # method named methodName." def test_init__test_name__valid(self): class Test(unittest.TestCase): def runTest(self): raise MyException() def test(self): pass self.assertEqual(Test('test').id()[-10:], '.Test.test') # "class TestCase([methodName])" # ... # "Each instance of TestCase will run a single test method: the # method named methodName." def test_init__test_name__invalid(self): class Test(unittest.TestCase): def runTest(self): raise MyException() def test(self): pass try: Test('testfoo') except ValueError: pass else: self.fail("Failed to raise ValueError") # "Return the number of tests represented by the this test object. For # TestCase instances, this will always be 1" def test_countTestCases(self): class Foo(unittest.TestCase): def test(self): pass self.assertEqual(Foo('test').countTestCases(), 1) # "Return the default type of test result object to be used to run this # test. For TestCase instances, this will always be # unittest.TestResult; subclasses of TestCase should # override this as necessary." def test_defaultTestResult(self): class Foo(unittest.TestCase): def runTest(self): pass result = Foo().defaultTestResult() self.assertEqual(type(result), unittest.TestResult) # "When a setUp() method is defined, the test runner will run that method # prior to each test. Likewise, if a tearDown() method is defined, the # test runner will invoke that method after each test. In the example, # setUp() was used to create a fresh sequence for each test." # # Make sure the proper call order is maintained, even if setUp() raises # an exception. def test_run_call_order__error_in_setUp(self): events = [] result = LoggingResult(events) class Foo(Test.LoggingTestCase): def setUp(self): super(Foo, self).setUp() raise RuntimeError('raised by Foo.setUp') Foo(events).run(result) expected = ['startTest', 'setUp', 'addError', 'stopTest'] self.assertEqual(events, expected) # "With a temporary result stopTestRun is called when setUp errors. def test_run_call_order__error_in_setUp_default_result(self): events = [] class Foo(Test.LoggingTestCase): def defaultTestResult(self): return LoggingResult(self.events) def setUp(self): super(Foo, self).setUp() raise RuntimeError('raised by Foo.setUp') Foo(events).run() expected = ['startTestRun', 'startTest', 'setUp', 'addError', 'stopTest', 'stopTestRun'] self.assertEqual(events, expected) # "When a setUp() method is defined, the test runner will run that method # prior to each test. Likewise, if a tearDown() method is defined, the # test runner will invoke that method after each test. In the example, # setUp() was used to create a fresh sequence for each test." # # Make sure the proper call order is maintained, even if the test raises # an error (as opposed to a failure). def test_run_call_order__error_in_test(self): events = [] result = LoggingResult(events) class Foo(Test.LoggingTestCase): def test(self): super(Foo, self).test() raise RuntimeError('raised by Foo.test') expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError', 'stopTest'] Foo(events).run(result) self.assertEqual(events, expected) # "With a default result, an error in the test still results in stopTestRun # being called." def test_run_call_order__error_in_test_default_result(self): events = [] class Foo(Test.LoggingTestCase): def defaultTestResult(self): return LoggingResult(self.events) def test(self): super(Foo, self).test() raise RuntimeError('raised by Foo.test') expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown', 'addError', 'stopTest', 'stopTestRun'] Foo(events).run() self.assertEqual(events, expected) # "When a setUp() method is defined, the test runner will run that method # prior to each test. Likewise, if a tearDown() method is defined, the # test runner will invoke that method after each test. In the example, # setUp() was used to create a fresh sequence for each test." # # Make sure the proper call order is maintained, even if the test signals # a failure (as opposed to an error). def test_run_call_order__failure_in_test(self): events = [] result = LoggingResult(events) class Foo(Test.LoggingTestCase): def test(self): super(Foo, self).test() self.fail('raised by Foo.test') expected = ['startTest', 'setUp', 'test', 'tearDown', 'addFailure', 'stopTest'] Foo(events).run(result) self.assertEqual(events, expected) # "When a test fails with a default result stopTestRun is still called." def test_run_call_order__failure_in_test_default_result(self): class Foo(Test.LoggingTestCase): def defaultTestResult(self): return LoggingResult(self.events) def test(self): super(Foo, self).test() self.fail('raised by Foo.test') expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown', 'addFailure', 'stopTest', 'stopTestRun'] events = [] Foo(events).run() self.assertEqual(events, expected) # "When a setUp() method is defined, the test runner will run that method # prior to each test. Likewise, if a tearDown() method is defined, the # test runner will invoke that method after each test. In the example, # setUp() was used to create a fresh sequence for each test." # # Make sure the proper call order is maintained, even if tearDown() raises # an exception. def test_run_call_order__error_in_tearDown(self): events = [] result = LoggingResult(events) class Foo(Test.LoggingTestCase): def tearDown(self): super(Foo, self).tearDown() raise RuntimeError('raised by Foo.tearDown') Foo(events).run(result) expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError', 'stopTest'] self.assertEqual(events, expected) # "When tearDown errors with a default result stopTestRun is still called." def test_run_call_order__error_in_tearDown_default_result(self): class Foo(Test.LoggingTestCase): def defaultTestResult(self): return LoggingResult(self.events) def tearDown(self): super(Foo, self).tearDown() raise RuntimeError('raised by Foo.tearDown') events = [] Foo(events).run() expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown', 'addError', 'stopTest', 'stopTestRun'] self.assertEqual(events, expected) # "TestCase.run() still works when the defaultTestResult is a TestResult # that does not support startTestRun and stopTestRun. def test_run_call_order_default_result(self): class Foo(unittest.TestCase): def defaultTestResult(self): return ResultWithNoStartTestRunStopTestRun() def test(self): pass Foo('test').run() # "This class attribute gives the exception raised by the test() method. # If a test framework needs to use a specialized exception, possibly to # carry additional information, it must subclass this exception in # order to ``play fair'' with the framework. The initial value of this # attribute is AssertionError" def test_failureException__default(self): class Foo(unittest.TestCase): def test(self): pass self.assertTrue(Foo('test').failureException is AssertionError) # "This class attribute gives the exception raised by the test() method. # If a test framework needs to use a specialized exception, possibly to # carry additional information, it must subclass this exception in # order to ``play fair'' with the framework." # # Make sure TestCase.run() respects the designated failureException def test_failureException__subclassing__explicit_raise(self): events = [] result = LoggingResult(events) class Foo(unittest.TestCase): def test(self): raise RuntimeError() failureException = RuntimeError self.assertTrue(Foo('test').failureException is RuntimeError) Foo('test').run(result) expected = ['startTest', 'addFailure', 'stopTest'] self.assertEqual(events, expected) # "This class attribute gives the exception raised by the test() method. # If a test framework needs to use a specialized exception, possibly to # carry additional information, it must subclass this exception in # order to ``play fair'' with the framework." # # Make sure TestCase.run() respects the designated failureException def test_failureException__subclassing__implicit_raise(self): events = [] result = LoggingResult(events) class Foo(unittest.TestCase): def test(self): self.fail("foo") failureException = RuntimeError self.assertTrue(Foo('test').failureException is RuntimeError) Foo('test').run(result) expected = ['startTest', 'addFailure', 'stopTest'] self.assertEqual(events, expected) # "The default implementation does nothing." def test_setUp(self): class Foo(unittest.TestCase): def runTest(self): pass # ... and nothing should happen Foo().setUp() # "The default implementation does nothing." def test_tearDown(self): class Foo(unittest.TestCase): def runTest(self): pass # ... and nothing should happen Foo().tearDown() # "Return a string identifying the specific test case." # # Because of the vague nature of the docs, I'm not going to lock this # test down too much. Really all that can be asserted is that the id() # will be a string (either 8-byte or unicode -- again, because the docs # just say "string") def test_id(self): class Foo(unittest.TestCase): def runTest(self): pass self.assertIsInstance(Foo().id(), str) # "If result is omitted or None, a temporary result object is created, # used, and is made available to the caller. As TestCase owns the # temporary result startTestRun and stopTestRun are called. def test_run__uses_defaultTestResult(self): events = [] defaultResult = LoggingResult(events) class Foo(unittest.TestCase): def test(self): events.append('test') def defaultTestResult(self): return defaultResult # Make run() find a result object on its own result = Foo('test').run() self.assertIs(result, defaultResult) expected = ['startTestRun', 'startTest', 'test', 'addSuccess', 'stopTest', 'stopTestRun'] self.assertEqual(events, expected) # "The result object is returned to run's caller" def test_run__returns_given_result(self): class Foo(unittest.TestCase): def test(self): pass result = unittest.TestResult() retval = Foo('test').run(result) self.assertIs(retval, result) # "The same effect [as method run] may be had by simply calling the # TestCase instance." def test_call__invoking_an_instance_delegates_to_run(self): resultIn = unittest.TestResult() resultOut = unittest.TestResult() class Foo(unittest.TestCase): def test(self): pass def run(self, result): self.assertIs(result, resultIn) return resultOut retval = Foo('test')(resultIn) self.assertIs(retval, resultOut) def testShortDescriptionWithoutDocstring(self): self.assertIsNone(self.shortDescription()) @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def testShortDescriptionWithOneLineDocstring(self): """Tests shortDescription() for a method with a docstring.""" self.assertEqual( self.shortDescription(), 'Tests shortDescription() for a method with a docstring.') @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def testShortDescriptionWithMultiLineDocstring(self): """Tests shortDescription() for a method with a longer docstring. This method ensures that only the first line of a docstring is returned used in the short description, no matter how long the whole thing is. """ self.assertEqual( self.shortDescription(), 'Tests shortDescription() for a method with a longer ' 'docstring.') def testAddTypeEqualityFunc(self): class SadSnake(object): """Dummy class for test_addTypeEqualityFunc.""" s1, s2 = SadSnake(), SadSnake() self.assertFalse(s1 == s2) def AllSnakesCreatedEqual(a, b, msg=None): return type(a) == type(b) == SadSnake self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual) self.assertEqual(s1, s2) # No this doesn't clean up and remove the SadSnake equality func # from this TestCase instance but since its a local nothing else # will ever notice that. def testAssertIs(self): thing = object() self.assertIs(thing, thing) self.assertRaises(self.failureException, self.assertIs, thing, object()) def testAssertIsNot(self): thing = object() self.assertIsNot(thing, object()) self.assertRaises(self.failureException, self.assertIsNot, thing, thing) def testAssertIsInstance(self): thing = [] self.assertIsInstance(thing, list) self.assertRaises(self.failureException, self.assertIsInstance, thing, dict) def testAssertNotIsInstance(self): thing = [] self.assertNotIsInstance(thing, dict) self.assertRaises(self.failureException, self.assertNotIsInstance, thing, list) def testAssertIn(self): animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'} self.assertIn('a', 'abc') self.assertIn(2, [1, 2, 3]) self.assertIn('monkey', animals) self.assertNotIn('d', 'abc') self.assertNotIn(0, [1, 2, 3]) self.assertNotIn('otter', animals) self.assertRaises(self.failureException, self.assertIn, 'x', 'abc') self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3]) self.assertRaises(self.failureException, self.assertIn, 'elephant', animals) self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc') self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3]) self.assertRaises(self.failureException, self.assertNotIn, 'cow', animals) def testAssertDictContainsSubset(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertDictContainsSubset({}, {}) self.assertDictContainsSubset({}, {'a': 1}) self.assertDictContainsSubset({'a': 1}, {'a': 1}) self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2}) self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2}) with self.assertRaises(self.failureException): self.assertDictContainsSubset({1: "one"}, {}) with self.assertRaises(self.failureException): self.assertDictContainsSubset({'a': 2}, {'a': 1}) with self.assertRaises(self.failureException): self.assertDictContainsSubset({'c': 1}, {'a': 1}) with self.assertRaises(self.failureException): self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1}) with self.assertRaises(self.failureException): self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1}) one = ''.join(chr(i) for i in range(255)) # this used to cause a UnicodeDecodeError constructing the failure msg with self.assertRaises(self.failureException): self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'}) def testAssertEqual(self): equal_pairs = [ ((), ()), ({}, {}), ([], []), (set(), set()), (frozenset(), frozenset())] for a, b in equal_pairs: # This mess of try excepts is to test the assertEqual behavior # itself. try: self.assertEqual(a, b) except self.failureException: self.fail('assertEqual(%r, %r) failed' % (a, b)) try: self.assertEqual(a, b, msg='foo') except self.failureException: self.fail('assertEqual(%r, %r) with msg= failed' % (a, b)) try: self.assertEqual(a, b, 'foo') except self.failureException: self.fail('assertEqual(%r, %r) with third parameter failed' % (a, b)) unequal_pairs = [ ((), []), ({}, set()), (set([4,1]), frozenset([4,2])), (frozenset([4,5]), set([2,3])), (set([3,4]), set([5,4]))] for a, b in unequal_pairs: self.assertRaises(self.failureException, self.assertEqual, a, b) self.assertRaises(self.failureException, self.assertEqual, a, b, 'foo') self.assertRaises(self.failureException, self.assertEqual, a, b, msg='foo') def testEquality(self): self.assertListEqual([], []) self.assertTupleEqual((), ()) self.assertSequenceEqual([], ()) a = [0, 'a', []] b = [] self.assertRaises(unittest.TestCase.failureException, self.assertListEqual, a, b) self.assertRaises(unittest.TestCase.failureException, self.assertListEqual, tuple(a), tuple(b)) self.assertRaises(unittest.TestCase.failureException, self.assertSequenceEqual, a, tuple(b)) b.extend(a) self.assertListEqual(a, b) self.assertTupleEqual(tuple(a), tuple(b)) self.assertSequenceEqual(a, tuple(b)) self.assertSequenceEqual(tuple(a), b) self.assertRaises(self.failureException, self.assertListEqual, a, tuple(b)) self.assertRaises(self.failureException, self.assertTupleEqual, tuple(a), b) self.assertRaises(self.failureException, self.assertListEqual, None, b) self.assertRaises(self.failureException, self.assertTupleEqual, None, tuple(b)) self.assertRaises(self.failureException, self.assertSequenceEqual, None, tuple(b)) self.assertRaises(self.failureException, self.assertListEqual, 1, 1) self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1) self.assertRaises(self.failureException, self.assertSequenceEqual, 1, 1) self.assertDictEqual({}, {}) c = { 'x': 1 } d = {} self.assertRaises(unittest.TestCase.failureException, self.assertDictEqual, c, d) d.update(c) self.assertDictEqual(c, d) d['x'] = 0 self.assertRaises(unittest.TestCase.failureException, self.assertDictEqual, c, d, 'These are unequal') self.assertRaises(self.failureException, self.assertDictEqual, None, d) self.assertRaises(self.failureException, self.assertDictEqual, [], d) self.assertRaises(self.failureException, self.assertDictEqual, 1, 1) def testAssertSequenceEqualMaxDiff(self): self.assertEqual(self.maxDiff, 80*8) seq1 = 'a' + 'x' * 80**2 seq2 = 'b' + 'x' * 80**2 diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(), pprint.pformat(seq2).splitlines())) # the +1 is the leading \n added by assertSequenceEqual omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,) self.maxDiff = len(diff)//2 try: self.assertSequenceEqual(seq1, seq2) except self.failureException as e: msg = e.args[0] else: self.fail('assertSequenceEqual did not fail.') self.assertTrue(len(msg) < len(diff)) self.assertIn(omitted, msg) self.maxDiff = len(diff) * 2 try: self.assertSequenceEqual(seq1, seq2) except self.failureException as e: msg = e.args[0] else: self.fail('assertSequenceEqual did not fail.') self.assertTrue(len(msg) > len(diff)) self.assertNotIn(omitted, msg) self.maxDiff = None try: self.assertSequenceEqual(seq1, seq2) except self.failureException as e: msg = e.args[0] else: self.fail('assertSequenceEqual did not fail.') self.assertTrue(len(msg) > len(diff)) self.assertNotIn(omitted, msg) def testTruncateMessage(self): self.maxDiff = 1 message = self._truncateMessage('foo', 'bar') omitted = unittest.case.DIFF_OMITTED % len('bar') self.assertEqual(message, 'foo' + omitted) self.maxDiff = None message = self._truncateMessage('foo', 'bar') self.assertEqual(message, 'foobar') self.maxDiff = 4 message = self._truncateMessage('foo', 'bar') self.assertEqual(message, 'foobar') def testAssertDictEqualTruncates(self): test = unittest.TestCase('assertEqual') def truncate(msg, diff): return 'foo' test._truncateMessage = truncate try: test.assertDictEqual({}, {1: 0}) except self.failureException as e: self.assertEqual(str(e), 'foo') else: self.fail('assertDictEqual did not fail') def testAssertMultiLineEqualTruncates(self): test = unittest.TestCase('assertEqual') def truncate(msg, diff): return 'foo' test._truncateMessage = truncate try: test.assertMultiLineEqual('foo', 'bar') except self.failureException as e: self.assertEqual(str(e), 'foo') else: self.fail('assertMultiLineEqual did not fail') def testAssertEqual_diffThreshold(self): # check threshold value self.assertEqual(self._diffThreshold, 2**16) # disable madDiff to get diff markers self.maxDiff = None # set a lower threshold value and add a cleanup to restore it old_threshold = self._diffThreshold self._diffThreshold = 2**8 self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold)) # under the threshold: diff marker (^) in error message s = 'x' * (2**7) with self.assertRaises(self.failureException) as cm: self.assertEqual(s + 'a', s + 'b') self.assertIn('^', str(cm.exception)) self.assertEqual(s + 'a', s + 'a') # over the threshold: diff not used and marker (^) not in error message s = 'x' * (2**9) # if the path that uses difflib is taken, _truncateMessage will be # called -- replace it with explodingTruncation to verify that this # doesn't happen def explodingTruncation(message, diff): raise SystemError('this should not be raised') old_truncate = self._truncateMessage self._truncateMessage = explodingTruncation self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate)) s1, s2 = s + 'a', s + 'b' with self.assertRaises(self.failureException) as cm: self.assertEqual(s1, s2) self.assertNotIn('^', str(cm.exception)) self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2)) self.assertEqual(s + 'a', s + 'a') def testAssertCountEqual(self): a = object() self.assertCountEqual([1, 2, 3], [3, 2, 1]) self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo']) self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2)) self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"]) self.assertRaises(self.failureException, self.assertCountEqual, [1, 2] + [3] * 100, [1] * 100 + [2, 3]) self.assertRaises(self.failureException, self.assertCountEqual, [1, "2", "a", "a"], ["a", "2", True, 1]) self.assertRaises(self.failureException, self.assertCountEqual, [10], [10, 11]) self.assertRaises(self.failureException, self.assertCountEqual, [10, 11], [10]) self.assertRaises(self.failureException, self.assertCountEqual, [10, 11, 10], [10, 11]) # Test that sequences of unhashable objects can be tested for sameness: self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]]) # Test that iterator of unhashable objects can be tested for sameness: self.assertCountEqual(iter([1, 2, [], 3, 4]), iter([1, 2, [], 3, 4])) # hashable types, but not orderable self.assertRaises(self.failureException, self.assertCountEqual, [], [divmod, 'x', 1, 5j, 2j, frozenset()]) # comparing dicts self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}]) # comparing heterogenous non-hashable sequences self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1]) self.assertRaises(self.failureException, self.assertCountEqual, [], [divmod, [], 'x', 1, 5j, 2j, set()]) self.assertRaises(self.failureException, self.assertCountEqual, [[1]], [[2]]) # Same elements, but not same sequence length self.assertRaises(self.failureException, self.assertCountEqual, [1, 1, 2], [2, 1]) self.assertRaises(self.failureException, self.assertCountEqual, [1, 1, "2", "a", "a"], ["2", "2", True, "a"]) self.assertRaises(self.failureException, self.assertCountEqual, [1, {'b': 2}, None, True], [{'b': 2}, True, None]) # Same elements which don't reliably compare, in # different order, see issue 10242 a = [{2,4}, {1,2}] b = a[::-1] self.assertCountEqual(a, b) # test utility functions supporting assertCountEqual() diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce')) expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')} self.assertEqual(diffs, expected) diffs = unittest.util._count_diff_all_purpose([[]], []) self.assertEqual(diffs, [(1, 0, [])]) diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce')) expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')} self.assertEqual(diffs, expected) def testAssertSetEqual(self): set1 = set() set2 = set() self.assertSetEqual(set1, set2) self.assertRaises(self.failureException, self.assertSetEqual, None, set2) self.assertRaises(self.failureException, self.assertSetEqual, [], set2) self.assertRaises(self.failureException, self.assertSetEqual, set1, None) self.assertRaises(self.failureException, self.assertSetEqual, set1, []) set1 = set(['a']) set2 = set() self.assertRaises(self.failureException, self.assertSetEqual, set1, set2) set1 = set(['a']) set2 = set(['a']) self.assertSetEqual(set1, set2) set1 = set(['a']) set2 = set(['a', 'b']) self.assertRaises(self.failureException, self.assertSetEqual, set1, set2) set1 = set(['a']) set2 = frozenset(['a', 'b']) self.assertRaises(self.failureException, self.assertSetEqual, set1, set2) set1 = set(['a', 'b']) set2 = frozenset(['a', 'b']) self.assertSetEqual(set1, set2) set1 = set() set2 = "foo" self.assertRaises(self.failureException, self.assertSetEqual, set1, set2) self.assertRaises(self.failureException, self.assertSetEqual, set2, set1) # make sure any string formatting is tuple-safe set1 = set([(0, 1), (2, 3)]) set2 = set([(4, 5)]) self.assertRaises(self.failureException, self.assertSetEqual, set1, set2) def testInequality(self): # Try ints self.assertGreater(2, 1) self.assertGreaterEqual(2, 1) self.assertGreaterEqual(1, 1) self.assertLess(1, 2) self.assertLessEqual(1, 2) self.assertLessEqual(1, 1) self.assertRaises(self.failureException, self.assertGreater, 1, 2) self.assertRaises(self.failureException, self.assertGreater, 1, 1) self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2) self.assertRaises(self.failureException, self.assertLess, 2, 1) self.assertRaises(self.failureException, self.assertLess, 1, 1) self.assertRaises(self.failureException, self.assertLessEqual, 2, 1) # Try Floats self.assertGreater(1.1, 1.0) self.assertGreaterEqual(1.1, 1.0) self.assertGreaterEqual(1.0, 1.0) self.assertLess(1.0, 1.1) self.assertLessEqual(1.0, 1.1) self.assertLessEqual(1.0, 1.0) self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1) self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0) self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1) self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0) self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0) self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0) # Try Strings self.assertGreater('bug', 'ant') self.assertGreaterEqual('bug', 'ant') self.assertGreaterEqual('ant', 'ant') self.assertLess('ant', 'bug') self.assertLessEqual('ant', 'bug') self.assertLessEqual('ant', 'ant') self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug') self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant') self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug') self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant') self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant') self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant') # Try bytes self.assertGreater(b'bug', b'ant') self.assertGreaterEqual(b'bug', b'ant') self.assertGreaterEqual(b'ant', b'ant') self.assertLess(b'ant', b'bug') self.assertLessEqual(b'ant', b'bug') self.assertLessEqual(b'ant', b'ant') self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug') self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant') self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant', b'bug') self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant') self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant') self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant') def testAssertMultiLineEqual(self): sample_text = """\ http://www.python.org/doc/2.3/lib/module-unittest.html test case A test case is the smallest unit of testing. [...] """ revised_sample_text = """\ http://www.python.org/doc/2.4.1/lib/module-unittest.html test case A test case is the smallest unit of testing. [...] You may provide your own implementation that does not subclass from TestCase, of course. """ sample_text_error = """\ - http://www.python.org/doc/2.3/lib/module-unittest.html ? ^ + http://www.python.org/doc/2.4.1/lib/module-unittest.html ? ^^^ test case - A test case is the smallest unit of testing. [...] + A test case is the smallest unit of testing. [...] You may provide your ? +++++++++++++++++++++ + own implementation that does not subclass from TestCase, of course. """ self.maxDiff = None try: self.assertMultiLineEqual(sample_text, revised_sample_text) except self.failureException as e: # need to remove the first line of the error message error = str(e).split('\n', 1)[1] # no fair testing ourself with ourself, and assertEqual is used for strings # so can't use assertEqual either. Just use assertTrue. self.assertTrue(sample_text_error == error) def testAsertEqualSingleLine(self): sample_text = "laden swallows fly slowly" revised_sample_text = "unladen swallows fly quickly" sample_text_error = """\ - laden swallows fly slowly ? ^^^^ + unladen swallows fly quickly ? ++ ^^^^^ """ try: self.assertEqual(sample_text, revised_sample_text) except self.failureException as e: error = str(e).split('\n', 1)[1] self.assertTrue(sample_text_error == error) def testAssertIsNone(self): self.assertIsNone(None) self.assertRaises(self.failureException, self.assertIsNone, False) self.assertIsNotNone('DjZoPloGears on Rails') self.assertRaises(self.failureException, self.assertIsNotNone, None) def testAssertRegex(self): self.assertRegex('asdfabasdf', r'ab+') self.assertRaises(self.failureException, self.assertRegex, 'saaas', r'aaaa') def testAssertRaisesRegex(self): class ExceptionMock(Exception): pass def Stub(): raise ExceptionMock('We expect') self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub) self.assertRaisesRegex(ExceptionMock, 'expect$', Stub) def testAssertNotRaisesRegex(self): self.assertRaisesRegex( self.failureException, '^Exception not raised by <lambda>$', self.assertRaisesRegex, Exception, re.compile('x'), lambda: None) self.assertRaisesRegex( self.failureException, '^Exception not raised by <lambda>$', self.assertRaisesRegex, Exception, 'x', lambda: None) def testAssertRaisesRegexMismatch(self): def Stub(): raise Exception('Unexpected') self.assertRaisesRegex( self.failureException, r'"\^Expected\$" does not match "Unexpected"', self.assertRaisesRegex, Exception, '^Expected$', Stub) self.assertRaisesRegex( self.failureException, r'"\^Expected\$" does not match "Unexpected"', self.assertRaisesRegex, Exception, re.compile('^Expected$'), Stub) def testAssertRaisesExcValue(self): class ExceptionMock(Exception): pass def Stub(foo): raise ExceptionMock(foo) v = "particular value" ctx = self.assertRaises(ExceptionMock) with ctx: Stub(v) e = ctx.exception self.assertIsInstance(e, ExceptionMock) self.assertEqual(e.args[0], v) def testAssertWarnsCallable(self): def _runtime_warn(): warnings.warn("foo", RuntimeWarning) # Success when the right warning is triggered, even several times self.assertWarns(RuntimeWarning, _runtime_warn) self.assertWarns(RuntimeWarning, _runtime_warn) # A tuple of warning classes is accepted self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn) # *args and **kwargs also work self.assertWarns(RuntimeWarning, warnings.warn, "foo", category=RuntimeWarning) # Failure when no warning is triggered with self.assertRaises(self.failureException): self.assertWarns(RuntimeWarning, lambda: 0) # Failure when another warning is triggered with warnings.catch_warnings(): # Force default filter (in case tests are run with -We) warnings.simplefilter("default", RuntimeWarning) with self.assertRaises(self.failureException): self.assertWarns(DeprecationWarning, _runtime_warn) # Filters for other warnings are not modified with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) with self.assertRaises(RuntimeWarning): self.assertWarns(DeprecationWarning, _runtime_warn) def testAssertWarnsContext(self): # Believe it or not, it is preferrable to duplicate all tests above, # to make sure the __warningregistry__ $@ is circumvented correctly. def _runtime_warn(): warnings.warn("foo", RuntimeWarning) _runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1] with self.assertWarns(RuntimeWarning) as cm: _runtime_warn() # A tuple of warning classes is accepted with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm: _runtime_warn() # The context manager exposes various useful attributes self.assertIsInstance(cm.warning, RuntimeWarning) self.assertEqual(cm.warning.args[0], "foo") self.assertIn("test_case.py", cm.filename) self.assertEqual(cm.lineno, _runtime_warn_lineno + 1) # Same with several warnings with self.assertWarns(RuntimeWarning): _runtime_warn() _runtime_warn() with self.assertWarns(RuntimeWarning): warnings.warn("foo", category=RuntimeWarning) # Failure when no warning is triggered with self.assertRaises(self.failureException): with self.assertWarns(RuntimeWarning): pass # Failure when another warning is triggered with warnings.catch_warnings(): # Force default filter (in case tests are run with -We) warnings.simplefilter("default", RuntimeWarning) with self.assertRaises(self.failureException): with self.assertWarns(DeprecationWarning): _runtime_warn() # Filters for other warnings are not modified with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) with self.assertRaises(RuntimeWarning): with self.assertWarns(DeprecationWarning): _runtime_warn() def testAssertWarnsRegexCallable(self): def _runtime_warn(msg): warnings.warn(msg, RuntimeWarning) self.assertWarnsRegex(RuntimeWarning, "o+", _runtime_warn, "foox") # Failure when no warning is triggered with self.assertRaises(self.failureException): self.assertWarnsRegex(RuntimeWarning, "o+", lambda: 0) # Failure when another warning is triggered with warnings.catch_warnings(): # Force default filter (in case tests are run with -We) warnings.simplefilter("default", RuntimeWarning) with self.assertRaises(self.failureException): self.assertWarnsRegex(DeprecationWarning, "o+", _runtime_warn, "foox") # Failure when message doesn't match with self.assertRaises(self.failureException): self.assertWarnsRegex(RuntimeWarning, "o+", _runtime_warn, "barz") # A little trickier: we ask RuntimeWarnings to be raised, and then # check for some of them. It is implementation-defined whether # non-matching RuntimeWarnings are simply re-raised, or produce a # failureException. with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) with self.assertRaises((RuntimeWarning, self.failureException)): self.assertWarnsRegex(RuntimeWarning, "o+", _runtime_warn, "barz") def testAssertWarnsRegexContext(self): # Same as above, but with assertWarnsRegex as a context manager def _runtime_warn(msg): warnings.warn(msg, RuntimeWarning) _runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1] with self.assertWarnsRegex(RuntimeWarning, "o+") as cm: _runtime_warn("foox") self.assertIsInstance(cm.warning, RuntimeWarning) self.assertEqual(cm.warning.args[0], "foox") self.assertIn("test_case.py", cm.filename) self.assertEqual(cm.lineno, _runtime_warn_lineno + 1) # Failure when no warning is triggered with self.assertRaises(self.failureException): with self.assertWarnsRegex(RuntimeWarning, "o+"): pass # Failure when another warning is triggered with warnings.catch_warnings(): # Force default filter (in case tests are run with -We) warnings.simplefilter("default", RuntimeWarning) with self.assertRaises(self.failureException): with self.assertWarnsRegex(DeprecationWarning, "o+"): _runtime_warn("foox") # Failure when message doesn't match with self.assertRaises(self.failureException): with self.assertWarnsRegex(RuntimeWarning, "o+"): _runtime_warn("barz") # A little trickier: we ask RuntimeWarnings to be raised, and then # check for some of them. It is implementation-defined whether # non-matching RuntimeWarnings are simply re-raised, or produce a # failureException. with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) with self.assertRaises((RuntimeWarning, self.failureException)): with self.assertWarnsRegex(RuntimeWarning, "o+"): _runtime_warn("barz") def testDeprecatedMethodNames(self): """ Test that the deprecated methods raise a DeprecationWarning. See #9424. """ old = ( (self.failIfEqual, (3, 5)), (self.assertNotEquals, (3, 5)), (self.failUnlessEqual, (3, 3)), (self.assertEquals, (3, 3)), (self.failUnlessAlmostEqual, (2.0, 2.0)), (self.assertAlmostEquals, (2.0, 2.0)), (self.failIfAlmostEqual, (3.0, 5.0)), (self.assertNotAlmostEquals, (3.0, 5.0)), (self.failUnless, (True,)), (self.assert_, (True,)), (self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')), (self.failIf, (False,)), (self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))), (self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])), (self.assertRegexpMatches, ('bar', 'bar')), ) for meth, args in old: with self.assertWarns(DeprecationWarning): meth(*args) # disable this test for now. When the version where the fail* methods will # be removed is decided, re-enable it and update the version def _testDeprecatedFailMethods(self): """Test that the deprecated fail* methods get removed in 3.x""" if sys.version_info[:2] < (3, 3): return deprecated_names = [ 'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual', 'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf', 'assertDictContainsSubset', ] for deprecated_name in deprecated_names: with self.assertRaises(AttributeError): getattr(self, deprecated_name) # remove these in 3.x def testDeepcopy(self): # Issue: 5660 class TestableTest(unittest.TestCase): def testNothing(self): pass test = TestableTest('testNothing') # This shouldn't blow up deepcopy(test) def testPickle(self): # Issue 10326 # Can't use TestCase classes defined in Test class as # pickle does not work with inner classes test = unittest.TestCase('run') for protocol in range(pickle.HIGHEST_PROTOCOL + 1): # blew up prior to fix pickled_test = pickle.dumps(test, protocol=protocol) unpickled_test = pickle.loads(pickled_test) self.assertEqual(test, unpickled_test) # exercise the TestCase instance in a way that will invoke # the type equality lookup mechanism unpickled_test.assertEqual(set(), set()) def testKeyboardInterrupt(self): def _raise(self=None): raise KeyboardInterrupt def nothing(self): pass class Test1(unittest.TestCase): test_something = _raise class Test2(unittest.TestCase): setUp = _raise test_something = nothing class Test3(unittest.TestCase): test_something = nothing tearDown = _raise class Test4(unittest.TestCase): def test_something(self): self.addCleanup(_raise) for klass in (Test1, Test2, Test3, Test4): with self.assertRaises(KeyboardInterrupt): klass('test_something').run() def testSkippingEverywhere(self): def _skip(self=None): raise unittest.SkipTest('some reason') def nothing(self): pass class Test1(unittest.TestCase): test_something = _skip class Test2(unittest.TestCase): setUp = _skip test_something = nothing class Test3(unittest.TestCase): test_something = nothing tearDown = _skip class Test4(unittest.TestCase): def test_something(self): self.addCleanup(_skip) for klass in (Test1, Test2, Test3, Test4): result = unittest.TestResult() klass('test_something').run(result) self.assertEqual(len(result.skipped), 1) self.assertEqual(result.testsRun, 1) def testSystemExit(self): def _raise(self=None): raise SystemExit def nothing(self): pass class Test1(unittest.TestCase): test_something = _raise class Test2(unittest.TestCase): setUp = _raise test_something = nothing class Test3(unittest.TestCase): test_something = nothing tearDown = _raise class Test4(unittest.TestCase): def test_something(self): self.addCleanup(_raise) for klass in (Test1, Test2, Test3, Test4): result = unittest.TestResult() klass('test_something').run(result) self.assertEqual(len(result.errors), 1) self.assertEqual(result.testsRun, 1) @support.cpython_only def testNoCycles(self): case = unittest.TestCase() wr = weakref.ref(case) with support.disable_gc(): del case self.assertFalse(wr())
agpl-3.0
luca76/QGIS
python/pyplugin_installer/qgsplugininstallerpluginerrordialog.py
9
1761
# -*- coding:utf-8 -*- """ /*************************************************************************** qgsplugininstallerpluginerrordialog.py Plugin Installer module ------------------- Date : June 2013 Copyright : (C) 2013 by Borys Jurgiel Email : info at borysjurgiel dot pl This module is based on former plugin_installer plugin: Copyright (C) 2007-2008 Matthew Perry Copyright (C) 2008-2013 Borys Jurgiel ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from PyQt4.QtGui import * from ui_qgsplugininstallerpluginerrorbase import Ui_QgsPluginInstallerPluginErrorDialogBase class QgsPluginInstallerPluginErrorDialog(QDialog, Ui_QgsPluginInstallerPluginErrorDialogBase): # ----------------------------------------- # def __init__(self, parent, errorMessage): QDialog.__init__(self, parent) self.setupUi(self) if not errorMessage: errorMessage = self.tr("no error message received") self.textBrowser.setText(errorMessage)
gpl-2.0
shtouff/django
django/contrib/auth/views.py
75
12175
import warnings from django.conf import settings # Avoid shadowing the login() and logout() views below. from django.contrib.auth import ( REDIRECT_FIELD_NAME, get_user_model, login as auth_login, logout as auth_logout, update_session_auth_hash, ) from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import ( AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm, ) from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.shortcuts import get_current_site from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, QueryDict from django.shortcuts import resolve_url from django.template.response import TemplateResponse from django.utils.deprecation import RemovedInDjango20Warning from django.utils.encoding import force_text from django.utils.http import is_safe_url, urlsafe_base64_decode from django.utils.six.moves.urllib.parse import urlparse, urlunparse from django.utils.translation import ugettext as _ from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters @sensitive_post_parameters() @csrf_protect @never_cache def login(request, template_name='registration/login.html', redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm, current_app=None, extra_context=None): """ Displays the login form and handles the login action. """ redirect_to = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name, '')) if request.method == "POST": form = authentication_form(request, data=request.POST) if form.is_valid(): # Ensure the user-originating redirection url is safe. if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL) # Okay, security check complete. Log the user in. auth_login(request, form.get_user()) return HttpResponseRedirect(redirect_to) else: form = authentication_form(request) current_site = get_current_site(request) context = { 'form': form, redirect_field_name: redirect_to, 'site': current_site, 'site_name': current_site.name, } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) def logout(request, next_page=None, template_name='registration/logged_out.html', redirect_field_name=REDIRECT_FIELD_NAME, current_app=None, extra_context=None): """ Logs out the user and displays 'You are logged out' message. """ auth_logout(request) if next_page is not None: next_page = resolve_url(next_page) if (redirect_field_name in request.POST or redirect_field_name in request.GET): next_page = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name)) # Security check -- don't allow redirection to a different host. if not is_safe_url(url=next_page, host=request.get_host()): next_page = request.path if next_page: # Redirect to this page until the session has been cleared. return HttpResponseRedirect(next_page) current_site = get_current_site(request) context = { 'site': current_site, 'site_name': current_site.name, 'title': _('Logged out') } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) def logout_then_login(request, login_url=None, current_app=None, extra_context=None): """ Logs out the user if they are logged in. Then redirects to the log-in page. """ if not login_url: login_url = settings.LOGIN_URL login_url = resolve_url(login_url) return logout(request, login_url, current_app=current_app, extra_context=extra_context) def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME): """ Redirects the user to the login page, passing the given 'next' page """ resolved_url = resolve_url(login_url or settings.LOGIN_URL) login_url_parts = list(urlparse(resolved_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[4], mutable=True) querystring[redirect_field_name] = next login_url_parts[4] = querystring.urlencode(safe='/') return HttpResponseRedirect(urlunparse(login_url_parts)) # 4 views for password reset: # - password_reset sends the mail # - password_reset_done shows a success message for the above # - password_reset_confirm checks the link the user clicked and # prompts for a new password # - password_reset_complete shows a success message for the above @csrf_protect def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html', email_template_name='registration/password_reset_email.html', subject_template_name='registration/password_reset_subject.txt', password_reset_form=PasswordResetForm, token_generator=default_token_generator, post_reset_redirect=None, from_email=None, current_app=None, extra_context=None, html_email_template_name=None): if post_reset_redirect is None: post_reset_redirect = reverse('password_reset_done') else: post_reset_redirect = resolve_url(post_reset_redirect) if request.method == "POST": form = password_reset_form(request.POST) if form.is_valid(): opts = { 'use_https': request.is_secure(), 'token_generator': token_generator, 'from_email': from_email, 'email_template_name': email_template_name, 'subject_template_name': subject_template_name, 'request': request, 'html_email_template_name': html_email_template_name, } if is_admin_site: warnings.warn( "The is_admin_site argument to " "django.contrib.auth.views.password_reset() is deprecated " "and will be removed in Django 2.0.", RemovedInDjango20Warning, 3 ) opts = dict(opts, domain_override=request.get_host()) form.save(**opts) return HttpResponseRedirect(post_reset_redirect) else: form = password_reset_form() context = { 'form': form, 'title': _('Password reset'), } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) def password_reset_done(request, template_name='registration/password_reset_done.html', current_app=None, extra_context=None): context = { 'title': _('Password reset sent'), } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) # Doesn't need csrf_protect since no-one can guess the URL @sensitive_post_parameters() @never_cache def password_reset_confirm(request, uidb64=None, token=None, template_name='registration/password_reset_confirm.html', token_generator=default_token_generator, set_password_form=SetPasswordForm, post_reset_redirect=None, current_app=None, extra_context=None): """ View that checks the hash in a password reset link and presents a form for entering a new password. """ UserModel = get_user_model() assert uidb64 is not None and token is not None # checked by URLconf if post_reset_redirect is None: post_reset_redirect = reverse('password_reset_complete') else: post_reset_redirect = resolve_url(post_reset_redirect) try: # urlsafe_base64_decode() decodes to bytestring on Python 3 uid = force_text(urlsafe_base64_decode(uidb64)) user = UserModel._default_manager.get(pk=uid) except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist): user = None if user is not None and token_generator.check_token(user, token): validlink = True title = _('Enter new password') if request.method == 'POST': form = set_password_form(user, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_reset_redirect) else: form = set_password_form(user) else: validlink = False form = None title = _('Password reset unsuccessful') context = { 'form': form, 'title': title, 'validlink': validlink, } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) def password_reset_complete(request, template_name='registration/password_reset_complete.html', current_app=None, extra_context=None): context = { 'login_url': resolve_url(settings.LOGIN_URL), 'title': _('Password reset complete'), } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) @sensitive_post_parameters() @csrf_protect @login_required def password_change(request, template_name='registration/password_change_form.html', post_change_redirect=None, password_change_form=PasswordChangeForm, current_app=None, extra_context=None): if post_change_redirect is None: post_change_redirect = reverse('password_change_done') else: post_change_redirect = resolve_url(post_change_redirect) if request.method == "POST": form = password_change_form(user=request.user, data=request.POST) if form.is_valid(): form.save() # Updating the password logs out all other sessions for the user # except the current one if # django.contrib.auth.middleware.SessionAuthenticationMiddleware # is enabled. update_session_auth_hash(request, form.user) return HttpResponseRedirect(post_change_redirect) else: form = password_change_form(user=request.user) context = { 'form': form, 'title': _('Password change'), } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context) @login_required def password_change_done(request, template_name='registration/password_change_done.html', current_app=None, extra_context=None): context = { 'title': _('Password change successful'), } if extra_context is not None: context.update(extra_context) if current_app is not None: request.current_app = current_app return TemplateResponse(request, template_name, context)
bsd-3-clause
tangyibin/goblin-core
riscv/llvm/3.5/llvm-3.5.0.src/test/CodeGen/SystemZ/Large/branch-range-04.py
13
3672
# Test 64-bit COMPARE AND BRANCH in cases where the sheer number of # instructions causes some branches to be out of range. # RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s # Construct: # # before0: # conditional branch to after0 # ... # beforeN: # conditional branch to after0 # main: # 0xffcc bytes, from MVIY instructions # conditional branch to main # after0: # ... # conditional branch to main # afterN: # # Each conditional branch sequence occupies 12 bytes if it uses a short # branch and 16 if it uses a long one. The ones before "main:" have to # take the branch length into account, which is 6 for short branches, # so the final (0x34 - 6) / 12 == 3 blocks can use short branches. # The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks # can use short branches. The conservative algorithm we use makes # one of the forward branches unnecessarily long, as noted in the # check output below. # # CHECK: lgb [[REG:%r[0-5]]], 0(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL:\.L[^ ]*]] # CHECK: lgb [[REG:%r[0-5]]], 1(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 2(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 3(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 4(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # ...as mentioned above, the next one could be a CGRJE instead... # CHECK: lgb [[REG:%r[0-5]]], 5(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 6(%r3) # CHECK: cgrje %r4, [[REG]], [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 7(%r3) # CHECK: cgrje %r4, [[REG]], [[LABEL]] # ...main goes here... # CHECK: lgb [[REG:%r[0-5]]], 25(%r3) # CHECK: cgrje %r4, [[REG]], [[LABEL:\.L[^ ]*]] # CHECK: lgb [[REG:%r[0-5]]], 26(%r3) # CHECK: cgrje %r4, [[REG]], [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 27(%r3) # CHECK: cgrje %r4, [[REG]], [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 28(%r3) # CHECK: cgrje %r4, [[REG]], [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 29(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 30(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 31(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] # CHECK: lgb [[REG:%r[0-5]]], 32(%r3) # CHECK: cgr %r4, [[REG]] # CHECK: jge [[LABEL]] branch_blocks = 8 main_size = 0xffcc print 'define void @f1(i8 *%base, i8 *%stop, i64 %limit) {' print 'entry:' print ' br label %before0' print '' for i in xrange(branch_blocks): next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main' print 'before%d:' % i print ' %%bstop%d = getelementptr i8 *%%stop, i64 %d' % (i, i) print ' %%bcur%d = load i8 *%%bstop%d' % (i, i) print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i) print ' %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i) print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next) print '' print '%s:' % next a, b = 1, 1 for i in xrange(0, main_size, 6): a, b = b, a + b offset = 4096 + b % 500000 value = a % 256 print ' %%ptr%d = getelementptr i8 *%%base, i64 %d' % (i, offset) print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i) for i in xrange(branch_blocks): print ' %%astop%d = getelementptr i8 *%%stop, i64 %d' % (i, i + 25) print ' %%acur%d = load i8 *%%astop%d' % (i, i) print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i) print ' %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i) print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i) print '' print 'after%d:' % i print ' ret void' print '}'
bsd-3-clause
christophreimer/pytesmo
tests/test_sat/test_ers.py
1
4902
# Copyright (c) 2013,Vienna University of Technology, # Department of Geodesy and Geoinformation # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Vienna University of Technology, Department of # Geodesy and Geoinformation nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY, # DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' Created on Nov 7, 2013 @author: Christoph Paulik christoph.paulik@geo.tuwien.ac.at ''' import os import unittest from pytesmo.io.sat import ers from datetime import datetime import numpy as np class TestERSNetCDF(unittest.TestCase): def setUp(self): self.ers_folder = os.path.join(os.path.dirname(__file__), '..', 'test-data', 'sat', 'ers', '55R11') self.ers_grid_folder = os.path.join(os.path.dirname(__file__), '..', 'test-data', 'sat', 'ascat', 'netcdf', 'grid') # init the ERS_SSM reader with the paths self.ers_SSM_reader = ers.ERS_SSM( self.ers_folder, self.ers_grid_folder) def test_read_ssm(self): gpi = 2329253 result = self.ers_SSM_reader.read_ssm(gpi, absolute_values=True) assert result.gpi == gpi np.testing.assert_approx_equal( result.longitude, 14.28413, significant=4) np.testing.assert_approx_equal( result.latitude, 45.698074, significant=4) assert list(result.data.columns) == ['orbit_dir', 'proc_flag', 'sm', 'sm_noise', 'sm_por_gldas', 'sm_noise_por_gldas', 'sm_por_hwsd', 'sm_noise_por_hwsd', 'frozen_prob', 'snow_prob'] assert len(result.data) == 478 assert result.data.ix[15].name == datetime(1992, 1, 27, 21, 11, 42, 55) assert result.data.ix[15]['sm'] == 57 assert result.data.ix[15]['sm_noise'] == 7 assert result.data.ix[15]['frozen_prob'] == 18 assert result.data.ix[15]['snow_prob'] == 0 assert result.data.ix[15]['orbit_dir'].decode('utf-8') == 'A' assert result.data.ix[15]['proc_flag'] == 0 np.testing.assert_approx_equal( result.data.ix[15]['sm_por_gldas'], 0.3090667, significant=6) np.testing.assert_approx_equal( result.data.ix[15]['sm_noise_por_gldas'], 0.03795555, significant=6) np.testing.assert_approx_equal( result.data.ix[15]['sm_por_hwsd'], 0.2452333, significant=6) np.testing.assert_approx_equal( result.data.ix[15]['sm_noise_por_hwsd'], 0.03011637, significant=6) assert result.topo_complex == 14 assert result.wetland_frac == 0 np.testing.assert_approx_equal( result.porosity_gldas, 0.54222, significant=5) np.testing.assert_approx_equal( result.porosity_hwsd, 0.430234, significant=5) def test_neighbor_search(self): self.ers_SSM_reader._load_grid_info() gpi, distance = self.ers_SSM_reader.grid.find_nearest_gpi(3.25, 46.13) assert gpi == 2346869 np.testing.assert_approx_equal(distance, 2267.42, significant=2) if __name__ == '__main__': unittest.main()
bsd-3-clause
holmes/intellij-community
python/lib/Lib/site-packages/django/contrib/admin/views/decorators.py
78
1458
try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.4 fallback. from django import template from django.shortcuts import render_to_response from django.utils.translation import ugettext as _ from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.views import login from django.contrib.auth import REDIRECT_FIELD_NAME def staff_member_required(view_func): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """ def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_staff: # The user is valid. Continue to the admin page. return view_func(request, *args, **kwargs) assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'." defaults = { 'template_name': 'admin/login.html', 'authentication_form': AdminAuthenticationForm, 'extra_context': { 'title': _('Log in'), 'app_path': request.get_full_path(), REDIRECT_FIELD_NAME: request.get_full_path(), }, } return login(request, **defaults) return wraps(view_func)(_checklogin)
apache-2.0
sonuyos/couchpotato
couchpotato/core/notifications/plex/client.py
81
2238
import json from couchpotato import CPLog from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import tryUrlencode import requests log = CPLog(__name__) class PlexClientProtocol(object): def __init__(self, plex): self.plex = plex addEvent('notify.plex.notifyClient', self.notify) def notify(self, client, message): raise NotImplementedError() class PlexClientHTTP(PlexClientProtocol): def request(self, command, client): url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % ( client['address'], client['port'], tryUrlencode(command) ) headers = {} try: self.plex.urlopen(url, headers = headers, timeout = 3, show_error = False) except Exception as err: log.error("Couldn't sent command to Plex: %s", err) return False return True def notify(self, client, message): if client.get('protocol') != 'xbmchttp': return None data = { 'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message } return self.request(data, client) class PlexClientJSON(PlexClientProtocol): def request(self, method, params, client): log.debug('sendJSON("%s", %s, %s)', (method, params, client)) url = 'http://%s:%s/jsonrpc' % ( client['address'], client['port'] ) headers = { 'Content-Type': 'application/json' } request = { 'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': params } try: requests.post(url, headers = headers, timeout = 3, data = json.dumps(request)) except Exception as err: log.error("Couldn't sent command to Plex: %s", err) return False return True def notify(self, client, message): if client.get('protocol') not in ['xbmcjson', 'plex']: return None params = { 'title': 'CouchPotato', 'message': message } return self.request('GUI.ShowNotification', params, client)
gpl-3.0
jimmystar/pyymal
build/lib.macosx-10.8-intel-2.7/yaml/parser.py
409
25542
# The following YAML grammar is LL(1) and is parsed by a recursive descent # parser. # # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* # block_node_or_indentless_sequence ::= # ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # FIRST sets: # # stream: { STREAM-START } # explicit_document: { DIRECTIVE DOCUMENT-START } # implicit_document: FIRST(block_node) # block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_sequence: { BLOCK-SEQUENCE-START } # block_mapping: { BLOCK-MAPPING-START } # block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } # indentless_sequence: { ENTRY } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_sequence: { FLOW-SEQUENCE-START } # flow_mapping: { FLOW-MAPPING-START } # flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } # flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } __all__ = ['Parser', 'ParserError'] from error import MarkedYAMLError from tokens import * from events import * from scanner import * class ParserError(MarkedYAMLError): pass class Parser(object): # Since writing a recursive-descendant parser is a straightforward task, we # do not give many comments here. DEFAULT_TAGS = { u'!': u'!', u'!!': u'tag:yaml.org,2002:', } def __init__(self): self.current_event = None self.yaml_version = None self.tag_handles = {} self.states = [] self.marks = [] self.state = self.parse_stream_start def dispose(self): # Reset the state attributes (to clear self-references) self.states = [] self.state = None def check_event(self, *choices): # Check the type of the next event. if self.current_event is None: if self.state: self.current_event = self.state() if self.current_event is not None: if not choices: return True for choice in choices: if isinstance(self.current_event, choice): return True return False def peek_event(self): # Get the next event. if self.current_event is None: if self.state: self.current_event = self.state() return self.current_event def get_event(self): # Get the next event and proceed further. if self.current_event is None: if self.state: self.current_event = self.state() value = self.current_event self.current_event = None return value # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* def parse_stream_start(self): # Parse the stream start. token = self.get_token() event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) # Prepare the next state. self.state = self.parse_implicit_document_start return event def parse_implicit_document_start(self): # Parse an implicit document. if not self.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): self.tag_handles = self.DEFAULT_TAGS token = self.peek_token() start_mark = end_mark = token.start_mark event = DocumentStartEvent(start_mark, end_mark, explicit=False) # Prepare the next state. self.states.append(self.parse_document_end) self.state = self.parse_block_node return event else: return self.parse_document_start() def parse_document_start(self): # Parse any extra document end indicators. while self.check_token(DocumentEndToken): self.get_token() # Parse an explicit document. if not self.check_token(StreamEndToken): token = self.peek_token() start_mark = token.start_mark version, tags = self.process_directives() if not self.check_token(DocumentStartToken): raise ParserError(None, None, "expected '<document start>', but found %r" % self.peek_token().id, self.peek_token().start_mark) token = self.get_token() end_mark = token.end_mark event = DocumentStartEvent(start_mark, end_mark, explicit=True, version=version, tags=tags) self.states.append(self.parse_document_end) self.state = self.parse_document_content else: # Parse the end of the stream. token = self.get_token() event = StreamEndEvent(token.start_mark, token.end_mark) assert not self.states assert not self.marks self.state = None return event def parse_document_end(self): # Parse the document end. token = self.peek_token() start_mark = end_mark = token.start_mark explicit = False if self.check_token(DocumentEndToken): token = self.get_token() end_mark = token.end_mark explicit = True event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) # Prepare the next state. self.state = self.parse_document_start return event def parse_document_content(self): if self.check_token(DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken): event = self.process_empty_scalar(self.peek_token().start_mark) self.state = self.states.pop() return event else: return self.parse_block_node() def process_directives(self): self.yaml_version = None self.tag_handles = {} while self.check_token(DirectiveToken): token = self.get_token() if token.name == u'YAML': if self.yaml_version is not None: raise ParserError(None, None, "found duplicate YAML directive", token.start_mark) major, minor = token.value if major != 1: raise ParserError(None, None, "found incompatible YAML document (version 1.* is required)", token.start_mark) self.yaml_version = token.value elif token.name == u'TAG': handle, prefix = token.value if handle in self.tag_handles: raise ParserError(None, None, "duplicate tag handle %r" % handle.encode('utf-8'), token.start_mark) self.tag_handles[handle] = prefix if self.tag_handles: value = self.yaml_version, self.tag_handles.copy() else: value = self.yaml_version, None for key in self.DEFAULT_TAGS: if key not in self.tag_handles: self.tag_handles[key] = self.DEFAULT_TAGS[key] return value # block_node_or_indentless_sequence ::= ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping def parse_block_node(self): return self.parse_node(block=True) def parse_flow_node(self): return self.parse_node() def parse_block_node_or_indentless_sequence(self): return self.parse_node(block=True, indentless_sequence=True) def parse_node(self, block=False, indentless_sequence=False): if self.check_token(AliasToken): token = self.get_token() event = AliasEvent(token.value, token.start_mark, token.end_mark) self.state = self.states.pop() else: anchor = None tag = None start_mark = end_mark = tag_mark = None if self.check_token(AnchorToken): token = self.get_token() start_mark = token.start_mark end_mark = token.end_mark anchor = token.value if self.check_token(TagToken): token = self.get_token() tag_mark = token.start_mark end_mark = token.end_mark tag = token.value elif self.check_token(TagToken): token = self.get_token() start_mark = tag_mark = token.start_mark end_mark = token.end_mark tag = token.value if self.check_token(AnchorToken): token = self.get_token() end_mark = token.end_mark anchor = token.value if tag is not None: handle, suffix = tag if handle is not None: if handle not in self.tag_handles: raise ParserError("while parsing a node", start_mark, "found undefined tag handle %r" % handle.encode('utf-8'), tag_mark) tag = self.tag_handles[handle]+suffix else: tag = suffix #if tag == u'!': # raise ParserError("while parsing a node", start_mark, # "found non-specific tag '!'", tag_mark, # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") if start_mark is None: start_mark = end_mark = self.peek_token().start_mark event = None implicit = (tag is None or tag == u'!') if indentless_sequence and self.check_token(BlockEntryToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark) self.state = self.parse_indentless_sequence_entry else: if self.check_token(ScalarToken): token = self.get_token() end_mark = token.end_mark if (token.plain and tag is None) or tag == u'!': implicit = (True, False) elif tag is None: implicit = (False, True) else: implicit = (False, False) event = ScalarEvent(anchor, tag, implicit, token.value, start_mark, end_mark, style=token.style) self.state = self.states.pop() elif self.check_token(FlowSequenceStartToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_sequence_first_entry elif self.check_token(FlowMappingStartToken): end_mark = self.peek_token().end_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_mapping_first_key elif block and self.check_token(BlockSequenceStartToken): end_mark = self.peek_token().start_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_sequence_first_entry elif block and self.check_token(BlockMappingStartToken): end_mark = self.peek_token().start_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_mapping_first_key elif anchor is not None or tag is not None: # Empty scalars are allowed even if a tag or an anchor is # specified. event = ScalarEvent(anchor, tag, (implicit, False), u'', start_mark, end_mark) self.state = self.states.pop() else: if block: node = 'block' else: node = 'flow' token = self.peek_token() raise ParserError("while parsing a %s node" % node, start_mark, "expected the node content, but found %r" % token.id, token.start_mark) return event # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END def parse_block_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_sequence_entry() def parse_block_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, BlockEndToken): self.states.append(self.parse_block_sequence_entry) return self.parse_block_node() else: self.state = self.parse_block_sequence_entry return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block collection", self.marks[-1], "expected <block end>, but found %r" % token.id, token.start_mark) token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ def parse_indentless_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_indentless_sequence_entry) return self.parse_block_node() else: self.state = self.parse_indentless_sequence_entry return self.process_empty_scalar(token.end_mark) token = self.peek_token() event = SequenceEndEvent(token.start_mark, token.start_mark) self.state = self.states.pop() return event # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END def parse_block_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_mapping_key() def parse_block_mapping_key(self): if self.check_token(KeyToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_value) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_value return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block mapping", self.marks[-1], "expected <block end>, but found %r" % token.id, token.start_mark) token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_block_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_key) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_block_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # Note that while production rules for both flow_sequence_entry and # flow_mapping_entry are equal, their interpretations are different. # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` # generate an inline mapping (set syntax). def parse_flow_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_sequence_entry(first=True) def parse_flow_sequence_entry(self, first=False): if not self.check_token(FlowSequenceEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow sequence", self.marks[-1], "expected ',' or ']', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.peek_token() event = MappingStartEvent(None, None, True, token.start_mark, token.end_mark, flow_style=True) self.state = self.parse_flow_sequence_entry_mapping_key return event elif not self.check_token(FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry) return self.parse_flow_node() token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_sequence_entry_mapping_key(self): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_value return self.process_empty_scalar(token.end_mark) def parse_flow_sequence_entry_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_end) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_end return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_sequence_entry_mapping_end token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_sequence_entry_mapping_end(self): self.state = self.parse_flow_sequence_entry token = self.peek_token() return MappingEndEvent(token.start_mark, token.start_mark) # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? def parse_flow_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_mapping_key(first=True) def parse_flow_mapping_key(self, first=False): if not self.check_token(FlowMappingEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow mapping", self.marks[-1], "expected ',' or '}', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_value return self.process_empty_scalar(token.end_mark) elif not self.check_token(FlowMappingEndToken): self.states.append(self.parse_flow_mapping_empty_value) return self.parse_flow_node() token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_key) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_mapping_empty_value(self): self.state = self.parse_flow_mapping_key return self.process_empty_scalar(self.peek_token().start_mark) def process_empty_scalar(self, mark): return ScalarEvent(None, None, (True, False), u'', mark, mark)
mit
LokiCoder/Sick-Beard
lib/cherrypy/lib/auth.py
68
3224
import cherrypy from cherrypy.lib import httpauth def check_auth(users, encrypt=None, realm=None): """If an authorization header contains credentials, return True or False. """ request = cherrypy.serving.request if 'authorization' in request.headers: # make sure the provided credentials are correctly set ah = httpauth.parseAuthorization(request.headers['authorization']) if ah is None: raise cherrypy.HTTPError(400, 'Bad Request') if not encrypt: encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5] if hasattr(users, '__call__'): try: # backward compatibility users = users() # expect it to return a dictionary if not isinstance(users, dict): raise ValueError( "Authentication users must be a dictionary") # fetch the user password password = users.get(ah["username"], None) except TypeError: # returns a password (encrypted or clear text) password = users(ah["username"]) else: if not isinstance(users, dict): raise ValueError("Authentication users must be a dictionary") # fetch the user password password = users.get(ah["username"], None) # validate the authorization by re-computing it here # and compare it with what the user-agent provided if httpauth.checkResponse(ah, password, method=request.method, encrypt=encrypt, realm=realm): request.login = ah["username"] return True request.login = False return False def basic_auth(realm, users, encrypt=None, debug=False): """If auth fails, raise 401 with a basic authentication header. realm A string containing the authentication realm. users A dict of the form: {username: password} or a callable returning a dict. encrypt callable used to encrypt the password returned from the user-agent. if None it defaults to a md5 encryption. """ if check_auth(users, encrypt): if debug: cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH') return # inform the user-agent this path is protected cherrypy.serving.response.headers[ 'www-authenticate'] = httpauth.basicAuth(realm) raise cherrypy.HTTPError( 401, "You are not authorized to access that resource") def digest_auth(realm, users, debug=False): """If auth fails, raise 401 with a digest authentication header. realm A string containing the authentication realm. users A dict of the form: {username: password} or a callable returning a dict. """ if check_auth(users, realm=realm): if debug: cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH') return # inform the user-agent this path is protected cherrypy.serving.response.headers[ 'www-authenticate'] = httpauth.digestAuth(realm) raise cherrypy.HTTPError( 401, "You are not authorized to access that resource")
gpl-3.0
neumerance/deploy
.venv/lib/python2.7/site-packages/django/contrib/auth/tests/custom_user.py
100
4528
from django.db import models from django.contrib.auth.models import ( BaseUserManager, AbstractBaseUser, AbstractUser, UserManager, PermissionsMixin ) # The custom User uses email as the unique identifier, and requires # that every user provide a date of birth. This lets us test # changes in username datatype, and non-text required fields. class CustomUserManager(BaseUserManager): def create_user(self, email, date_of_birth, password=None): """ Creates and saves a User with the given email and password. """ if not email: raise ValueError('Users must have an email address') user = self.model( email=CustomUserManager.normalize_email(email), date_of_birth=date_of_birth, ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password, date_of_birth): u = self.create_user(email, password=password, date_of_birth=date_of_birth) u.is_admin = True u.save(using=self._db) return u class CustomUser(AbstractBaseUser): email = models.EmailField(verbose_name='email address', max_length=255, unique=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) date_of_birth = models.DateField() custom_objects = CustomUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['date_of_birth'] class Meta: app_label = 'auth' def get_full_name(self): return self.email def get_short_name(self): return self.email def __unicode__(self): return self.email # Maybe required? def get_group_permissions(self, obj=None): return set() def get_all_permissions(self, obj=None): return set() def has_perm(self, perm, obj=None): return True def has_perms(self, perm_list, obj=None): return True def has_module_perms(self, app_label): return True # Admin required fields @property def is_staff(self): return self.is_admin # The extension user is a simple extension of the built-in user class, # adding a required date_of_birth field. This allows us to check for # any hard references to the name "User" in forms/handlers etc. class ExtensionUser(AbstractUser): date_of_birth = models.DateField() custom_objects = UserManager() REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth'] class Meta: app_label = 'auth' # The CustomPermissionsUser users email as the identifier, but uses the normal # Django permissions model. This allows us to check that the PermissionsMixin # includes everything that is needed to interact with the ModelBackend. class CustomPermissionsUserManager(CustomUserManager): def create_superuser(self, email, password, date_of_birth): u = self.create_user(email, password=password, date_of_birth=date_of_birth) u.is_superuser = True u.save(using=self._db) return u class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin): email = models.EmailField(verbose_name='email address', max_length=255, unique=True) date_of_birth = models.DateField() custom_objects = CustomPermissionsUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['date_of_birth'] class Meta: app_label = 'auth' def get_full_name(self): return self.email def get_short_name(self): return self.email def __unicode__(self): return self.email class IsActiveTestUser1(AbstractBaseUser): """ This test user class and derivatives test the default is_active behavior """ username = models.CharField(max_length=30, unique=True) custom_objects = BaseUserManager() USERNAME_FIELD = 'username' class Meta: app_label = 'auth' # the is_active attr is provided by AbstractBaseUser class CustomUserNonUniqueUsername(AbstractBaseUser): "A user with a non-unique username" username = models.CharField(max_length=30) USERNAME_FIELD = 'username' class Meta: app_label = 'auth' class CustomUserBadRequiredFields(AbstractBaseUser): "A user with a non-unique username" username = models.CharField(max_length=30, unique=True) date_of_birth = models.DateField() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['username', 'date_of_birth'] class Meta: app_label = 'auth'
apache-2.0
Alwnikrotikz/pyglet
contrib/wydget/wydget/widgets/music.py
29
4961
import os from pyglet.gl import * from pyglet.window import mouse from pyglet import media, clock from wydget import element, event, util, data, layouts from wydget.widgets.frame import Frame from wydget.widgets.label import Image, Label from wydget.widgets.button import Button class Music(Frame): name='music' def __init__(self, parent, file=None, source=None, title=None, playing=False, bgcolor=(1, 1, 1, 1), color=(0, 0, 0, 1), font_size=20, **kw): '''Pass in a filename as "file" or a pyglet Source as "source". ''' self.parent = parent if file is not None: source = media.load(file, streaming=True) else: assert source is not None, 'one of file or source is required' self.player = media.Player() # poke at the audio format if not source.audio_format: raise ValueError("File doesn't contain audio") super(Music, self).__init__(parent, bgcolor=bgcolor, **kw) # lay it out # control frame top-level c = self.control = Frame(self, width='100%', height=64) ft = Frame(c, is_transparent=True, width='100%', height='100%') ft.layout = layouts.Vertical(ft) Label(ft, title or 'unknown', color=color, bgcolor=bgcolor, padding=2, font_size=font_size) # controls underlay f = Frame(ft, is_transparent=True, width='100%', height='100%') f.layout = layouts.Horizontal(f, valign='center', halign='center', padding=10) c.play = Image(f, data.load_gui_image('media-play.png'), classes=('-play-button',), is_visible=not playing) c.pause = Image(f, data.load_gui_image('media-pause.png'), bgcolor=None, classes=('-pause-button',), is_visible=playing) fi = Frame(f, is_transparent=True) c.range = Image(fi, data.load_gui_image('media-range.png')) c.position = Image(fi, data.load_gui_image('media-position.png'), y=-2, classes=('-position',)) c.time = Label(f, '00:00', font_size=20) c.anim = None # make sure we get at least one frame to display self.player.queue(source) clock.schedule(self.update) self.playing = False if playing: self.play() def update(self, dt): self.player.dispatch_events() def pause(self): if not self.playing: return clock.unschedule(self.time_update) self.player.pause() self.control.pause.setVisible(False) self.control.play.setVisible(True) self.playing = False def play(self): if self.playing: return clock.schedule(self.time_update) self.player.play() self.control.pause.setVisible(True) self.control.play.setVisible(False) self.playing = True def on_eos(self): self.player.seek(0) self.pause() self.control.time.text = '00:00' self.getGUI().dispatch_event(self, 'on_eos', self) def time_update(self, ts): if not self.control.isVisible(): return t = self.player.time # time display s = int(t) m = t // 60 h = m // 60 m %= 60 s = s % 60 if h: text = '%d:%02d:%02d'%(h, m, s) else: text = '%02d:%02d'%(m, s) if text != self.control.time.text: self.control.time.text = text # slider position p = (t/self.player.source.duration) self.control.position.x = int(p * self.control.range.width) def delete(self): self.pause() super(Music, self).delete() @event.default('music .-play-button') def on_click(widget, x, y, buttons, modifiers, click_count): if not buttons & mouse.LEFT: return event.EVENT_UNHANDLED widget.getParent('music').play() return event.EVENT_HANDLED @event.default('music .-pause-button') def on_click(widget, x, y, buttons, modifiers, click_count): if not buttons & mouse.LEFT: return event.EVENT_UNHANDLED widget.getParent('music').pause() return event.EVENT_HANDLED @event.default('music .-position') def on_mouse_press(widget, x, y, buttons, modifiers): if not buttons & mouse.LEFT: return event.EVENT_UNHANDLED widget.getParent('music').pause() return event.EVENT_HANDLED @event.default('music .-position') def on_mouse_release(widget, x, y, buttons, modifiers): if not buttons & mouse.LEFT: return event.EVENT_UNHANDLED widget.getParent('music').play() return event.EVENT_HANDLED @event.default('music .-position') def on_drag(widget, x, y, dx, dy, buttons, modifiers): if not buttons & mouse.LEFT: return event.EVENT_UNHANDLED music = widget.getParent('music') rw = music.control.range.width widget.x = max(0, min(rw, widget.x + dx)) p = float(widget.x) / rw music.player.seek(p * music.player.source.duration) return event.EVENT_HANDLED
bsd-3-clause
andrewromanenco/pyjvm
pyjvm/platform/java/lang/object.py
1
3672
# PyJVM (pyjvm.org) Java Virtual Machine implemented in pure Python # Copyright (C) 2014 Andrew Romanenco (andrew@romanenco.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. '''See natives.txt in documentation''' from pyjvm.jvmo import JArray from pyjvm.thread import SkipThreadCycle def java_lang_Object_getClass___Ljava_lang_Class_(frame, args): assert len(args) > 0 assert type(args[0]) is tuple assert args[0][0] == "ref" and args[0][1] > 0 o = frame.vm.heap[args[0][1]] klass = o.java_class ref = frame.vm.get_class_class(klass) frame.stack.append(ref) def java_lang_Object_hashCode___I(frame, args): assert type(args[0]) is tuple frame.stack.append(args[0][1]) # address in heap is object's hash def java_lang_Object_wait__J_V(frame, args): ref = args[0] waiting_time = args[1] assert ref is not None # NPE o = frame.vm.heap[ref[1]] assert o is not None t = frame.thread if t.is_notified: t.waiting_notify = False if "@monitor" in o.fields: frame.stack.append(ref) frame.stack.append(waiting_time) raise SkipThreadCycle() else: o.waiting_list.remove(t) o.fields["@monitor"] = t o.fields["@monitor_count"] = t.monitor_count_cache t.is_notified = False return if t.waiting_notify: if t.sleep_until > 0: now = int(time.time()) * 1000 if now <= t.sleep_until: if "@monitor" in o.fields: frame.stack.append(ref) frame.stack.append(waiting_time) raise SkipThreadCycle() else: o.waiting_list.remove(t) o.fields["@monitor"] = t o.fields["@monitor_count"] = t.monitor_count_cache t.is_notified = False t.waiting_notify = False return frame.stack.append(ref) frame.stack.append(waiting_time) raise SkipThreadCycle() else: assert "@monitor" in o.fields assert o.fields["@monitor"] == frame.thread o.waiting_list.append(t) t.waiting_notify = True if waiting_time[1] > 0: now = int(time.time()) * 1000 t.sleep_until = now + waiting_time[1] t.monitor_count_cache = o.fields["@monitor_count"] del o.fields["@monitor"] del o.fields["@monitor_count"] frame.stack.append(ref) frame.stack.append(waiting_time) raise SkipThreadCycle() def java_lang_Object_clone___Ljava_lang_Object_(frame, args): # TODO NPE o = frame.vm.heap[args[0][1]] if o.java_class.is_array: clone = JArray(o.java_class, frame.vm) clone.values = o.values[:] ref = frame.vm.add_to_heap(clone) frame.stack.append(ref) else: clone = o.java_class.get_instance(frame.vm) clone.fields = o.fields.copy() ref = frame.vm.add_to_heap(clone) frame.stack.append(ref)
gpl-3.0
karih/Flask-HTTPAuth
tests/test_basic_get_password.py
2
2280
import unittest import base64 from flask import Flask from flask_httpauth import HTTPBasicAuth class HTTPAuthTestCase(unittest.TestCase): def setUp(self): app = Flask(__name__) app.config['SECRET_KEY'] = 'my secret' basic_auth = HTTPBasicAuth() @basic_auth.get_password def get_basic_password(username): if username == 'john': return 'hello' elif username == 'susan': return 'bye' else: return None @app.route('/') def index(): return 'index' @app.route('/basic') @basic_auth.login_required def basic_auth_route(): return 'basic_auth:' + basic_auth.username() self.app = app self.basic_auth = basic_auth self.client = app.test_client() def test_no_auth(self): response = self.client.get('/') self.assertEqual(response.data.decode('utf-8'), 'index') def test_basic_auth_prompt(self): response = self.client.get('/basic') self.assertEqual(response.status_code, 401) self.assertTrue('WWW-Authenticate' in response.headers) self.assertEqual(response.headers['WWW-Authenticate'], 'Basic realm="Authentication Required"') def test_basic_auth_ignore_options(self): response = self.client.options('/basic') self.assertEqual(response.status_code, 200) self.assertTrue('WWW-Authenticate' not in response.headers) def test_basic_auth_login_valid(self): creds = base64.b64encode(b'john:hello').decode('utf-8') response = self.client.get( '/basic', headers={'Authorization': 'Basic ' + creds}) self.assertEqual(response.data.decode('utf-8'), 'basic_auth:john') def test_basic_auth_login_invalid(self): creds = base64.b64encode(b'john:bye').decode('utf-8') response = self.client.get( '/basic', headers={'Authorization': 'Basic ' + creds}) self.assertEqual(response.status_code, 401) self.assertTrue('WWW-Authenticate' in response.headers) self.assertEqual(response.headers['WWW-Authenticate'], 'Basic realm="Authentication Required"')
mit
credativUK/vdirsyncer
tests/cli/test_config.py
1
4939
import io from textwrap import dedent import pytest from vdirsyncer import cli @pytest.fixture def read_config(tmpdir): def inner(cfg): f = io.StringIO(dedent(cfg.format(base=str(tmpdir)))) return cli.utils.read_config(f) return inner def test_read_config(read_config, monkeypatch): errors = [] monkeypatch.setattr('vdirsyncer.cli.cli_logger.error', errors.append) general, pairs, storages = read_config(u''' [general] status_path = /tmp/status/ [pair bob] a = bob_a b = bob_b foo = bar bam = true [storage bob_a] type = filesystem path = /tmp/contacts/ fileext = .vcf yesno = false number = 42 [storage bob_b] type = carddav [bogus] lol = true ''') assert general == {'status_path': '/tmp/status/'} assert pairs == {'bob': ('bob_a', 'bob_b', {'bam': True, 'foo': 'bar'})} assert storages == { 'bob_a': {'type': 'filesystem', 'path': '/tmp/contacts/', 'fileext': '.vcf', 'yesno': False, 'number': 42, 'instance_name': 'bob_a'}, 'bob_b': {'type': 'carddav', 'instance_name': 'bob_b'} } assert len(errors) == 1 assert errors[0].startswith('Unknown section') assert 'bogus' in errors[0] def test_storage_instance_from_config(monkeypatch): def lol(**kw): assert kw == {'foo': 'bar', 'baz': 1} return 'OK' import vdirsyncer.storage monkeypatch.setitem(vdirsyncer.cli.utils.storage_names._storages, 'lol', lol) config = {'type': 'lol', 'foo': 'bar', 'baz': 1} assert cli.utils.storage_instance_from_config(config) == 'OK' def test_parse_pairs_args(): pairs = { 'foo': ('bar', 'baz', {'conflict_resolution': 'a wins'}, {'storage_option': True}), 'one': ('two', 'three', {'collections': 'a,b,c'}, {}), 'eins': ('zwei', 'drei', {'ha': True}, {}) } assert sorted( cli.parse_pairs_args(['foo/foocoll', 'one', 'eins'], pairs) ) == [ ('eins', set()), ('foo', {'foocoll'}), ('one', set()), ] def test_missing_general_section(read_config): with pytest.raises(cli.CliError) as excinfo: read_config(u''' [pair my_pair] a = my_a b = my_b [storage my_a] type = filesystem path = {base}/path_a/ fileext = .txt [storage my_b] type = filesystem path = {base}/path_b/ fileext = .txt ''') assert 'Invalid general section.' in excinfo.value.msg def test_wrong_general_section(read_config): with pytest.raises(cli.CliError) as excinfo: read_config(u''' [general] wrong = true ''') assert 'Invalid general section.' in excinfo.value.msg assert excinfo.value.problems == [ 'general section doesn\'t take the parameters: wrong', 'general section is missing the parameters: status_path' ] def test_invalid_storage_name(): f = io.StringIO(dedent(u''' [general] status_path = {base}/status/ [storage foo.bar] ''')) with pytest.raises(cli.CliError) as excinfo: cli.utils.read_config(f) assert 'invalid characters' in str(excinfo.value).lower() def test_parse_config_value(capsys): invalid = object() def x(s): try: rv = cli.utils.parse_config_value(s) except ValueError: return invalid else: warnings = capsys.readouterr()[1] return rv, len(warnings.splitlines()) assert x('123 # comment!') is invalid assert x('True') == ('True', 1) assert x('False') == ('False', 1) assert x('Yes') == ('Yes', 1) assert x('None') == ('None', 1) assert x('"True"') == ('True', 0) assert x('"False"') == ('False', 0) assert x('"123 # comment!"') == ('123 # comment!', 0) assert x('true') == (True, 0) assert x('false') == (False, 0) assert x('null') == (None, 0) assert x('3.14') == (3.14, 0) assert x('') == ('', 0) assert x('""') == ('', 0) def test_invalid_collections_arg(): f = io.StringIO(dedent(u''' [general] status_path = /tmp/status/ [pair foobar] a = foo b = bar collections = [null] [storage foo] type = filesystem path = /tmp/foo/ fileext = .txt [storage bar] type = filesystem path = /tmp/bar/ fileext = .txt ''')) with pytest.raises(cli.utils.CliError) as excinfo: cli.utils.read_config(f) assert ( 'Section `pair foobar`: `collections` parameter must be a list of ' 'collection names (strings!) or `null`.' ) in str(excinfo.value)
mit
tpoy0099/pyctp
example/pyctp2/test/test_position.py
7
28891
#-*- coding:utf-8 -*- import logging import time import unittest import unittest.mock as mock from ..common.base import ( XOPEN,XCLOSE,XCLOSE_TODAY, LONG,SHORT, BaseObject, TEST_PATH, MAX_VALUE, ) from ..common.contract_type import ContractInfo as ci,M,P from ..common.cjson import dumps,loads from ..trader.strategy import POPEN,PCLOSE from ..trader.position import ( Order, Position, PosInfo, PositionHolder, Closer,ContractCloser,GlobalCloser, LongCloserParameter1, ORDER_STATUS, ) class OrderTestCase(unittest.TestCase): def setUp(self): self._contract = ci('m1501',1501,M) self._pos = Position(self._contract,LONG) def tearDown(self): pass def test_base(self): order = Order(self._pos,XOPEN,3500,1,1,30) order.approve(10,350000) order.trigger_time = 100 #测试通道 self.assertEqual(ORDER_STATUS.INITIALIZED,order._state) self.assertEqual(10,order.approved) self.assertEqual(self._contract._name,order.contract_name) self.assertEqual(self._contract,order.contract) self.assertEqual([],order._details) def test_calc_margin(self): order = Order(self._pos,XOPEN,3500,1,1,30) order.target_price = 3530 self._pos.contract.long_marginrate = 0.1 self._pos.contract.multiple = 50 self.assertTrue(abs(17650-order.calc_margin()) < 0.001) def test_accomplished(self): order = Order(self._pos,XOPEN,3500,1,1,30) order._accomplished = 1 self.assertEqual(1,order.true_accomplished) order._accomplished2 = 2 self.assertEqual(2,order.true_accomplished) def test_part_accomplished(self): order = Order(self._pos,XOPEN,3500,1,1,30) self.assertEqual(0,order.accomplished) self.assertEqual(0,order.accomplished2) self.assertEqual(0,order.transfered) order.part_accomplished(10) self.assertEqual(10,order.accomplished) self.assertEqual(10,order.accomplished2) self.assertEqual(10,order.transfered) def test_isdone(self): order = Order(self._pos,XOPEN,3500,1,1,30) order._state = ORDER_STATUS.ACCEPT self.assertEqual(False,order.done) order._state = ORDER_STATUS.REJECT self.assertEqual(True,order.done) order._state = ORDER_STATUS.CANCELLED self.assertEqual(True,order.done) order._state = ORDER_STATUS.PART_SUCCESSED self.assertEqual(True,order.done) order._state = ORDER_STATUS.SUCCESSED self.assertEqual(True,order.done) def test_on_reject(self): order = Order(self._pos,XOPEN,3500,1,1,30) self.assertEqual(False,order.done) order.on_reject() self.assertEqual(True,order.done) def test_on_progress(self): order = Order(self._pos,XOPEN,3500,2,2,30) order.approve(2,70000) self.assertEqual(0,order._accomplished) order.on_progress(1,3501) self.assertEqual(1,order._accomplished) self.assertEqual(1,len(order._details)) self.assertEqual(1,order._details[0].volume) self.assertEqual(3501,order._details[0].price) self.assertRaises(AssertionError,order.on_progress,2,3501) order.on_progress(1,3502) self.assertEqual(2,order._accomplished) self.assertEqual(2,len(order._details)) self.assertEqual(1,order._details[-1].volume) self.assertEqual(3502,order._details[-1].price) def test_on_done(self): order = Order(self._pos,XOPEN,3500,2,2,30) self.assertRaises(AssertionError,order.on_done,2) order.approve(2,70000) order.on_done(2) self.assertEqual(2,order._accomplished2) self.assertEqual(ORDER_STATUS.SUCCESSED,order._state) def test_on_done_cancelled(self): order2 = Order(self._pos,XOPEN,3500,2,2,30) order2.approve(2,70000) order2.on_done(1) self.assertEqual(1,order2._accomplished2) self.assertEqual(ORDER_STATUS.CANCELLED,order2._state) def test_on_done_part_success(self): order = Order(self._pos,XOPEN,3500,2,2,30) order.approve(2,70000) order.on_progress(1,3501) order.on_done(1) self.assertEqual(1,order._accomplished2) self.assertEqual(ORDER_STATUS.PART_SUCCESSED,order._state) def test_on_the_flying(self): order = Order(self._pos,XOPEN,3500,2,2,30) order.approve(2,80000) self.assertEqual(2,order.volume_on_the_flying) order.on_done(1) self.assertEqual(1,order.volume_on_the_flying) class CloserTestCase(unittest.TestCase): def test_closer_func1(self): func1 = mock.Mock() closer = Closer(func1,LongCloserParameter1) closer.prepare([BaseObject()]) closer.check(BaseObject(),[BaseObject()]) self.assertTrue(True) def test_closer_func2(self): func2 = mock.Mock() closer = Closer(func2,LongCloserParameter1()) closer.prepare([BaseObject(),BaseObject()]) closer.check(BaseObject(),BaseObject(),[BaseObject()]) self.assertTrue(True) def test_closer_dump(self): contract = ci('m1501',1501,M) pos = Position(contract,LONG) pos2 = Position(contract,LONG) closer = Closer(pos.add_close_order,LongCloserParameter1()) closer.prepare([pos,pos2]) cs=dumps(closer) rcs = loads(cs) self.assertEqual("add_close_order",rcs.close_func_name) #print(cs) def test_id(self): contract = ci('m1501',1501,M) pos = Position(contract,LONG) closer = Closer(pos.add_close_order,LongCloserParameter1()) closer2 = Closer(pos.add_close_order,LongCloserParameter1()) self.assertEqual(closer._id+1,closer2._id) func1 = mock.Mock() closer3 = ContractCloser(func1,LongCloserParameter1) self.assertEqual(closer._id+2,closer3._id) func2 = mock.Mock() closer4 = GlobalCloser(func2,LongCloserParameter1) self.assertEqual(closer._id+3,closer4._id) def test_calc_target_price(self): contract = BaseObject(upperlimit_price=3000,lowerlimit_price=2500,price_tick=2) #direction=LONG self.assertEqual(3000,Order.calc_target_price(contract,LONG,2800,MAX_VALUE)) self.assertEqual(2500,Order.calc_target_price(contract,LONG,2800,-MAX_VALUE)) self.assertEqual(2860,Order.calc_target_price(contract,LONG,2800,30)) self.assertEqual(2740,Order.calc_target_price(contract,LONG,2800,-30)) self.assertEqual(3000,Order.calc_target_price(contract,LONG,2800,200)) self.assertEqual(2500,Order.calc_target_price(contract,LONG,2800,-200)) #direction=SHORT self.assertEqual(2500,Order.calc_target_price(contract,SHORT,2800,MAX_VALUE)) self.assertEqual(3000,Order.calc_target_price(contract,SHORT,2800,-MAX_VALUE)) self.assertEqual(2740,Order.calc_target_price(contract,SHORT,2800,30)) self.assertEqual(2860,Order.calc_target_price(contract,SHORT,2800,-30)) self.assertEqual(2500,Order.calc_target_price(contract,SHORT,2800,200)) self.assertEqual(3000,Order.calc_target_price(contract,SHORT,2800,-200)) class ContractCloserTestCase(unittest.TestCase): def setUp(self): self._pholder = BaseObject(get_volume_holding=lambda x:10) def test_closer_func(self): func1 = mock.Mock() contract = ci('m1501',1501,M) closer = ContractCloser(func1,LongCloserParameter1) closer.prepare(self._pholder,contract) closer.check(BaseObject(),BaseObject(),[BaseObject()]) closer.check(BaseObject(),BaseObject(),[BaseObject(),BaseObject()]) self.assertTrue(True) def test_closer_dump(self): contract = ci('m1501',1501,M) pos = Position(contract,LONG) closer = ContractCloser(pos.add_close_order,LongCloserParameter1()) closer.prepare(self._pholder,contract) cs=dumps(closer) rcs = loads(cs) self.assertEqual("add_close_order",rcs.close_func_name) #print(cs) class GlobalCloserTestCase(unittest.TestCase): def setUp(self): self._pholder = BaseObject(name2positions=[10,20]) def test_closer_func(self): func1 = mock.Mock() closer = GlobalCloser(func1,LongCloserParameter1) closer.prepare(self._pholder) closer.check(BaseObject(),BaseObject(),[BaseObject()]) closer.check(BaseObject(),BaseObject(),[BaseObject(),BaseObject()]) self.assertTrue(True) def test_closer_dump(self): func1 = mock.Mock() contract = ci('m1501',1501,M) pos = Position(contract,LONG) closer = GlobalCloser(pos.add_close_order,LongCloserParameter1()) #closer = GlobalCloser(func1,LongCloserParameter()) closer.prepare(self._pholder) cs=dumps(closer) rcs = loads(cs) self.assertEqual("add_close_order",rcs.close_func_name) #print(closer.close_func) class PositionTestCase(unittest.TestCase): def setUp(self): self._contract = ci('m1501',1501,M) self._contract._trading_day = 20141232 self._pos = Position(self._contract,LONG) cc = mock.Mock() cc.__name__ = "TEST_MOCK" closer1 = Closer(cc,None) closer2 = ContractCloser(cc,None) closer3 = GlobalCloser(cc,None) self._popen = POPEN(self._contract,3650,LONG,[closer1,closer2,closer3],30) self._popen.unit = self._popen.planned = 1 self._popen.direction = LONG self._open_order = self._pos.add_open_order(self._popen) self._open_order._approved = 1 self._open_order._trade_info=BaseObject(trading_day = 20100101) self._pclose = PCLOSE(self._pos,base_price=3650,volume=2,extra_hops=30) self._pclose.unit = self._pclose.planned = 1 def test_open_direction(self): pos = Position(self._contract,LONG) popen = POPEN(self._contract,3650,SHORT,[1,2,3],30) popen.unit = popen.planned = 1 self.assertRaises(AssertionError,pos.add_open_order,popen) #方向不一致 def test_open_orders(self): order = self._open_order self.assertEqual(self._pos,order.position) self.assertEqual(XOPEN,order.atype) self.assertEqual(self._pos._direction,order.direction) self.assertEqual(self._popen.base_price,order.base_price) self.assertEqual(self._popen.unit,order.unit) self.assertEqual(self._popen.planned,order.planned) self.assertEqual(self._popen.extra_hops,order.extra_hops) #不能有第二个open_order self.assertRaises(AssertionError,self._pos.add_open_order,self._popen) open_order = self._pos.open_order self.assertEqual(order,open_order) def test_close_order(self): close_order = self._pos.add_close_order(self._pclose) self.assertEqual(XCLOSE,close_order.atype) close_order._approved = 1 self.assertEqual(1,len(self._pos._close_orders)) self.assertRaises(AssertionError,self._pos.add_close_order,self._pclose) #超过持仓 self.assertEqual(1,len(self._pos._close_orders)) self.assertEqual(close_order,self._pos._close_orders[-1]) close_order._state = ORDER_STATUS.SUCCESSED close_order2 = self._pos.add_close_order(self._pclose) #未超过持仓,因为成功后,close_order.accomplished2=0 self.assertEqual(2,len(self._pos._close_orders)) self.assertEqual(close_order2,self._pos._close_orders[-1]) close_order._accomplished2 = 1 self.assertRaises(AssertionError,self._pos.add_close_order,self._pclose) #超过持仓 def test_close_order_today(self): self._contract._trading_day = self._open_order.trade_info.trading_day close_order = self._pos.add_close_order(self._pclose) self.assertEqual(XCLOSE_TODAY,close_order.atype) def test_check_closed(self): #check_closed self.assertFalse(self._pos.check_closed()) self._open_order._state = ORDER_STATUS.SUCCESSED self.assertTrue(self._pos.check_closed()) self._pos._closed = False self._open_order._accomplished2 = 1 self.assertFalse(self._pos.check_closed()) #closed close_order = self._pos.add_close_order(self._pclose) self.assertFalse(self._pos.check_closed()) close_order._approved = 1 self.assertFalse(self._pos.check_closed()) close_order._accomplished2 = 1 self.assertTrue(self._pos.check_closed()) def test_check_closed2(self): #check_closed self.assertFalse(self._pos.check_closed()) self._open_order._state = ORDER_STATUS.SUCCESSED self.assertTrue(self._pos.check_closed()) self._pos._closed = False self._open_order._approved = self._open_order._accomplished2 = 2 self.assertFalse(self._pos.check_closed()) #closed close_order = self._pos.add_close_order(self._pclose) self.assertFalse(self._pos.check_closed()) close_order._approved = 1 self.assertFalse(self._pos.check_closed()) close_order._accomplished2 = 1 self.assertFalse(self._pos.check_closed()) close_order2 = self._pos.add_close_order(self._pclose) self.assertFalse(self._pos.check_closed()) close_order2._accomplished2 = 1 self.assertTrue(self._pos.check_closed()) def test_get_on_the_flying(self): #通路测试 self.assertEqual(1,self._pos._get_on_the_flying(self._pos._open_orders)) self._pos._open_orders.append(self._pos._open_orders[0]) self.assertEqual(2,self._pos._get_on_the_flying(self._pos._open_orders)) def test_get_approved(self): #通路测试 self.assertEqual(1,self._pos._get_approved(self._pos._open_orders)) self._pos._open_orders.append(self._pos._open_orders[0]) self.assertEqual(2,self._pos._get_approved(self._pos._open_orders)) def test_get_accomplished2(self): #通路测试 self.assertEqual(0,self._pos._get_accomplished2(self._pos._open_orders)) self._pos._open_orders.append(self._pos._open_orders[0]) self.assertEqual(0,self._pos._get_accomplished2(self._pos._open_orders)) self._open_order._accomplished2 = 1 self.assertEqual(2,self._pos._get_accomplished2(self._pos._open_orders)) def test_get_holding_volume(self): #通路测试 self.assertEqual(1,self._pos.volume_holding) def test_get_approved_volume(self): #通路测试 self.assertEqual(1,self._pos.volume_approved) def test_get_accomplished2_volume(self): #通路测试 self.assertEqual(0,self._pos.volume_accomplished2) self._open_order._accomplished2 = 1 self.assertEqual(1,self._pos.volume_accomplished2) def test_dumps_loads(self): stxt = dumps(self._pos) #print(stxt) pos = loads(stxt) self.assertEqual(self._pos._contract.name,pos.contract.name) self.assertEqual(type(self._pos._contract.ctype),type(pos.contract.ctype)) self.assertEqual(len(self._pos._closers),len(pos.closers)) sst = [type(c) for c in self._pos._closers] for s in pos.closers: self.assertTrue(type(s) in sst) self.assertEqual(self._pos._open_orders[0].unit,pos.open_orders[0].unit) self.assertEqual(self._pos._open_orders[0].planned,pos.open_orders[0].planned) self.assertEqual(self._pos._open_orders[0].approved,pos.open_orders[0].approved) self.assertEqual(self._pos._open_orders[0].base_price,pos.open_orders[0].base_price) self.assertEqual(len(self._pos._close_orders),len(pos.close_orders)) class PosInfoTestCase(unittest.TestCase):#被间接测试 def test_pos_info(self): pinfo = PosInfo("ctest",10,5) self.assertEqual(10,pinfo._num_long) self.assertEqual(5,pinfo._num_short) self.assertEqual(15,pinfo.total) self.assertEqual(5,pinfo.net) pinfo.add_short(11) self.assertEqual(16,pinfo._num_short) pinfo.add_long(11) self.assertEqual(21,pinfo._num_long) self.assertEqual(37,pinfo.total) self.assertEqual(5,pinfo.net) def test_add(self): pinfo = PosInfo("ctest",10,5) pother = PosInfo("ctest",1,3) pinfo.add(pother) self.assertEqual(11,pinfo._num_long) self.assertEqual(8,pinfo._num_short) self.assertEqual(19,pinfo.total) self.assertEqual(3,pinfo.net) pother2 = PosInfo("ctest2",0,1) self.assertRaises(AssertionError,pinfo.add(pother)) class PositionHolderTestCase(unittest.TestCase): def setUp(self): self._contract_m = ci('m1501',1501,M) self._contract_p = ci('p1501',1501,P) self._contract_m._trading_day = 20141232 self._contract_p._trading_day = 20141232 pos1 = Position(self._contract_m,LONG) pos2 = Position(self._contract_m,LONG) pos3 = Position(self._contract_p,LONG) cc = mock.Mock() cc.__name__ = "TEST_MOCK" closer1 = Closer(cc,LongCloserParameter1()) closer2 = ContractCloser(cc,None) closer3 = GlobalCloser(cc,None) self._poses = [pos1,pos2,pos3] popen1 = POPEN(self._contract_m,3650,LONG,[closer1,closer2,closer3],30) popen1.unit = popen1.planned = 1 popen1.direction = LONG open_order1 = pos1.add_open_order(popen1) open_order1._approved = 1 open_order1._trade_info=BaseObject(trading_day = 20100101) closer1_2 = Closer(cc,LongCloserParameter1()) closer2_2 = ContractCloser(cc,None) closer3_2 = GlobalCloser(cc,None) popen2 = POPEN(self._contract_m,3650,LONG,[closer1_2,closer2_2,closer3_2],30) popen2.unit = popen2.planned = 1 popen2.direction = LONG open_order2 = pos2.add_open_order(popen2) open_order2._approved = 2 open_order2._trade_info=BaseObject(trading_day = 20100101) closer1_3 = Closer(cc,LongCloserParameter1()) closer2_3= ContractCloser(cc,None) closer3_3 = GlobalCloser(cc,None) popen3 = POPEN(self._contract_p,3650,SHORT,[closer1_3,closer2_3,closer3_3],30) popen3.unit = popen3.planned = 1 popen3.direction = LONG open_order3 = pos3.add_open_order(popen3) open_order3._approved = 1 open_order3._trade_info=BaseObject(trading_day = 20100101) #self.pclose = PCLOSE(self.pos1,base_price=3650,volume=2,extra_hops=30) #self.pclose.wanted = self.pclose.planned = 1 def test_base(self): pholder = PositionHolder() self.assertEqual(1,pholder._max_position_id) self.assertEqual(2,pholder.inc_position_id()) self.assertEqual(2,pholder._max_position_id) def test_check_consistency(self): pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) self.assertTrue(pholder.check_consistency()) pos4 = Position(self._contract_m,LONG) #测试 active_positions中position也必须在name2position中出现 pholder._active_positions.append(pos4) self.assertRaises(AssertionError,pholder.check_consistency) del pholder._active_positions[3] self.assertTrue(pholder.check_consistency()) # 测试name2position中的position必须在active_position中出现 pholder._name2positions[pos4._contract.name].append(pos4) self.assertRaises(AssertionError,pholder.check_consistency) def test_add_positions(self): """ 测试 add_positions/_add_position/_fill_closer 为半通道测试 """ pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) self.assertEqual(1,len(pholder._active_positions)) self.assertEqual(1,len(pholder._name2positions)) self.assertEqual(self._poses[0],pholder._active_positions[0]) self.assertEqual(self._poses[0],pholder._name2positions[self._poses[0]._contract.name][0]) pholder.add_positions(self._poses[1:]) self.assertEqual(3,len(pholder._active_positions)) self.assertEqual(2,len(pholder._name2positions)) self.assertEqual(self._poses[2],pholder._active_positions[2]) self.assertEqual([self._poses[0],self._poses[1]],pholder._name2positions[self._poses[0]._contract.name]) self.assertEqual(self._poses[2],pholder._name2positions[self._poses[2]._contract.name][0]) def test_add_positions_failed(self): pholder = PositionHolder() self.assertRaises(AssertionError,pholder.add_positions,self._poses[0:2]) #同个Contract不能同时发出多个Position def test_on_order_done(self): pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) self.assertEqual(3,len(pholder._active_positions)) self.assertEqual(2,len(pholder._name2positions)) self.assertEqual([self._poses[0],self._poses[1]],pholder._name2positions[self._poses[0]._contract.name]) self._poses[0]._open_orders[0]._approved=0 self.assertTrue(self._poses[0].check_closed()) pholder.on_order_done(self._poses[0]) self.assertEqual(2,len(pholder._active_positions)) self.assertEqual(2,len(pholder._name2positions)) self.assertEqual([self._poses[1]],pholder._name2positions[self._poses[0]._contract.name]) self.assertEqual(1,len(pholder._closed_positions)) pholder.to_dict() #closed position self.assertTrue(True) #顺利到达 def test_get_by_name(self): pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) poses1 = pholder.get_positions_by_name("m1501") poses2 = pholder.get_positions_by_name("p1501") self.assertEqual(2,len(poses1)) self.assertEqual(1,len(poses2)) self.assertEqual([self._poses[0],self._poses[1]],poses1) self.assertEqual([self._poses[2]],poses2) def test_get_holding_volume(self): #通道测试 pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) hvs = pholder.get_volume_holding("m1501") self.assertEqual(3,hvs) hvs2 = pholder.get_volume_holding("p1501") self.assertEqual(1,hvs2) def test_get_approved_volume(self): #通道测试 pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) hvs = pholder.get_volume_approved("m1501") self.assertEqual(3,hvs) hvs2 = pholder.get_volume_approved("p1501") self.assertEqual(1,hvs2) self._poses[0]._open_orders[0]._approved = 0 self._poses[1]._open_orders[0]._approved = 1 hvs3 = pholder.get_volume_approved("m1501") self.assertEqual(1,hvs3) def test_get_holding_info(self): #通道测试 pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:2]) pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:2]) #pholder.add_positions(self.poses[0:1]) hvs = pholder.get_volume_holding("m1501") self.assertEqual(6,hvs) pinfo = pholder.get_holding_info("m1501") self.assertEqual(6,pinfo.total) self.assertEqual(6,pinfo.net) self.assertEqual(6,pinfo.num_long) self.assertEqual(0,pinfo.num_short) #这个只是了解了内部实现逻辑之后的临时设定, 在pos内部是不和谐的,但测试够用 self._poses[1]._direction = SHORT pinfo = pholder.get_holding_info("m1501") self.assertEqual(6,pinfo.total) self.assertEqual(-2,pinfo.net) self.assertEqual(2,pinfo.num_long) self.assertEqual(4,pinfo.num_short) self._poses[1]._open_orders[0]._approved = 1 pinfo = pholder.get_holding_info("m1501") self.assertEqual(4,pinfo.total) self.assertEqual(0,pinfo.net) self.assertEqual(2,pinfo.num_long) self.assertEqual(2,pinfo.num_short) def test_get_approved_info(self): #通道测试 pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:2]) pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:2]) #pholder.add_positions(self.poses[0:1]) hvs = pholder.get_volume_approved("m1501") self.assertEqual(6,hvs) pinfo = pholder.get_approved_info("m1501") self.assertEqual(6,pinfo.total) self.assertEqual(6,pinfo.net) self.assertEqual(6,pinfo.num_long) self.assertEqual(0,pinfo.num_short) #这个只是了解了内部实现逻辑之后的临时设定, 在pos内部是不和谐的,但测试够用 self._poses[1]._direction = SHORT pinfo = pholder.get_approved_info("m1501") self.assertEqual(6,pinfo.total) self.assertEqual(-2,pinfo.net) self.assertEqual(2,pinfo.num_long) self.assertEqual(4,pinfo.num_short) def test_to_dict(self): #通道测试 pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) sdict = pholder.to_dict() self.assertTrue(True) stxt = dumps(pholder) self.assertTrue(True) #print(stxt) ##测试closed_position self._poses[0]._open_orders[0]._approved = 0 pholder.on_order_done(self._poses[0]) self.assertEqual(1,len(pholder._closed_positions)) sdict = pholder.to_dict() stxt = dumps(pholder) self.assertTrue(True) def test_serialize(self): pholder = PositionHolder() pholder.add_positions(self._poses[0:1]) pholder.add_positions(self._poses[1:]) #print(list(pholder.active_positions[1].closers)[0].positions) stxt = dumps(pholder) #print(stxt) pholder2 = loads(stxt) self.assertEqual(3,len(pholder2.active_positions)) self.assertEqual(2,len(pholder2.name2positions)) self.assertEqual(self._poses[0]._contract.name,pholder2.active_positions[0].contract.name) #print(id(self.poses[0]),id(pholder2.active_positions[0])) self.assertEqual(pholder2.get_positions_by_name("m1501")[0],pholder2.active_positions[0]) self.assertEqual(pholder2.get_positions_by_name("m1501")[1],pholder2.active_positions[1]) self.assertEqual(pholder2.get_positions_by_name("p1501")[0],pholder2.active_positions[2]) for closer in pholder2.active_positions[0].closers: if isinstance(closer,Closer): self.assertEqual(pholder2.active_positions[0],closer._positions[0]) self.assertEqual(1,len(closer._positions)) else: self.assertEqual(pholder2,closer._position_holder) for closer in pholder2.active_positions[1].closers: if isinstance(closer,Closer): self.assertEqual(pholder2.active_positions[1],closer._positions[0]) self.assertEqual(pholder2.active_positions[2],closer._positions[1]) self.assertEqual(2,len(closer._positions)) else: self.assertEqual(pholder2,closer._position_holder) #测试 close_positions通道 self.assertEqual(0,len(pholder._closed_positions)) self._poses[0]._open_orders[0]._approved = 0 pholder.on_order_done(self._poses[0]) self.assertEqual(1,len(pholder._closed_positions)) stxt2 = dumps(pholder) #print(stxt2) pholder3 = loads(stxt2) #self.assertEqual(1,len(pholder3.closed_positions)) #self.assertEqual(pholder._closed_positions[0].contract.name,pholder3.closed_positions[0].contract.name) #self.assertEqual(pholder._closed_positions[0].open_orders[0].base_price,pholder3.closed_positions[0].open_orders[0].base_price) #elf.assertEqual(pholder._closed_positions[0].open_orders[0].approved,pholder3.closed_positions[0].open_orders[0].approved) if __name__ == '__main__': logging.basicConfig(filename="%s/test_position.log" % (TEST_PATH,),level=logging.DEBUG,format='%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s') unittest.main()
mit
ltilve/ChromiumGStreamerBackend
tools/telemetry/third_party/gsutilz/gslib/command_runner.py
12
15982
# -*- coding: utf-8 -*- # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class that runs a named gsutil command.""" from __future__ import absolute_import import difflib import logging import os import pkgutil import sys import textwrap import time import boto from boto.storage_uri import BucketStorageUri import gslib from gslib.cloud_api_delegator import CloudApiDelegator from gslib.command import Command from gslib.command import CreateGsutilLogger from gslib.command import GetFailureCount from gslib.command import OLD_ALIAS_MAP from gslib.command import ShutDownGsutil import gslib.commands from gslib.cs_api_map import ApiSelector from gslib.cs_api_map import GsutilApiClassMapFactory from gslib.cs_api_map import GsutilApiMapFactory from gslib.exception import CommandException from gslib.gcs_json_api import GcsJsonApi from gslib.no_op_credentials import NoOpCredentials from gslib.tab_complete import MakeCompleter from gslib.util import CompareVersions from gslib.util import GetGsutilVersionModifiedTime from gslib.util import GSUTIL_PUB_TARBALL from gslib.util import IsRunningInteractively from gslib.util import LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE from gslib.util import LookUpGsutilVersion from gslib.util import MultiprocessingIsAvailable from gslib.util import RELEASE_NOTES_URL from gslib.util import SECONDS_PER_DAY from gslib.util import UTF8 def HandleArgCoding(args): """Handles coding of command-line args. Args: args: array of command-line args. Returns: array of command-line args. Raises: CommandException: if errors encountered. """ # Python passes arguments from the command line as byte strings. To # correctly interpret them, we decode ones other than -h and -p args (which # will be passed as headers, and thus per HTTP spec should not be encoded) as # utf-8. The exception is x-goog-meta-* headers, which are allowed to contain # non-ASCII content (and hence, should be decoded), per # https://developers.google.com/storage/docs/gsutil/addlhelp/WorkingWithObjectMetadata processing_header = False for i in range(len(args)): arg = args[i] # Commands like mv can run this function twice; don't decode twice. try: decoded = arg if isinstance(arg, unicode) else arg.decode(UTF8) except UnicodeDecodeError: raise CommandException('\n'.join(textwrap.wrap( 'Invalid encoding for argument (%s). Arguments must be decodable as ' 'Unicode. NOTE: the argument printed above replaces the problematic ' 'characters with a hex-encoded printable representation. For more ' 'details (including how to convert to a gsutil-compatible encoding) ' 'see `gsutil help encoding`.' % repr(arg)))) if processing_header: if arg.lower().startswith('x-goog-meta'): args[i] = decoded else: try: # Try to encode as ASCII to check for invalid header values (which # can't be sent over HTTP). decoded.encode('ascii') except UnicodeEncodeError: # Raise the CommandException using the decoded value because # _OutputAndExit function re-encodes at the end. raise CommandException( 'Invalid non-ASCII header value (%s).\nOnly ASCII characters are ' 'allowed in headers other than x-goog-meta- headers' % decoded) else: args[i] = decoded processing_header = (arg in ('-h', '-p')) return args class CommandRunner(object): """Runs gsutil commands and does some top-level argument handling.""" def __init__(self, bucket_storage_uri_class=BucketStorageUri, gsutil_api_class_map_factory=GsutilApiClassMapFactory, command_map=None): """Instantiates a CommandRunner. Args: bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking. gsutil_api_class_map_factory: Creates map of cloud storage interfaces. Settable for testing/mocking. command_map: Map of command names to their implementations for testing/mocking. If not set, the map is built dynamically. """ self.bucket_storage_uri_class = bucket_storage_uri_class self.gsutil_api_class_map_factory = gsutil_api_class_map_factory if command_map: self.command_map = command_map else: self.command_map = self._LoadCommandMap() def _LoadCommandMap(self): """Returns dict mapping each command_name to implementing class.""" # Import all gslib.commands submodules. for _, module_name, _ in pkgutil.iter_modules(gslib.commands.__path__): __import__('gslib.commands.%s' % module_name) command_map = {} # Only include Command subclasses in the dict. for command in Command.__subclasses__(): command_map[command.command_spec.command_name] = command for command_name_aliases in command.command_spec.command_name_aliases: command_map[command_name_aliases] = command return command_map def _ConfigureCommandArgumentParserArguments( self, parser, arguments, gsutil_api): """Configures an argument parser with the given arguments. Args: parser: argparse parser object. arguments: array of CommandArgument objects. gsutil_api: gsutil Cloud API instance to use. Raises: RuntimeError: if argument is configured with unsupported completer """ for command_argument in arguments: action = parser.add_argument( *command_argument.args, **command_argument.kwargs) if command_argument.completer: action.completer = MakeCompleter(command_argument.completer, gsutil_api) def ConfigureCommandArgumentParsers(self, subparsers): """Configures argparse arguments and argcomplete completers for commands. Args: subparsers: argparse object that can be used to add parsers for subcommands (called just 'commands' in gsutil) """ # This should match the support map for the "ls" command. support_map = { 'gs': [ApiSelector.XML, ApiSelector.JSON], 's3': [ApiSelector.XML] } default_map = { 'gs': ApiSelector.JSON, 's3': ApiSelector.XML } gsutil_api_map = GsutilApiMapFactory.GetApiMap( self.gsutil_api_class_map_factory, support_map, default_map) logger = CreateGsutilLogger('tab_complete') gsutil_api = CloudApiDelegator( self.bucket_storage_uri_class, gsutil_api_map, logger, debug=0) for command in set(self.command_map.values()): command_parser = subparsers.add_parser( command.command_spec.command_name, add_help=False) if isinstance(command.command_spec.argparse_arguments, dict): subcommand_parsers = command_parser.add_subparsers() subcommand_argument_dict = command.command_spec.argparse_arguments for subcommand, arguments in subcommand_argument_dict.iteritems(): subcommand_parser = subcommand_parsers.add_parser( subcommand, add_help=False) self._ConfigureCommandArgumentParserArguments( subcommand_parser, arguments, gsutil_api) else: self._ConfigureCommandArgumentParserArguments( command_parser, command.command_spec.argparse_arguments, gsutil_api) def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, skip_update_check=False, logging_filters=None, do_shutdown=True): """Runs the named command. Used by gsutil main, commands built atop other commands, and tests. Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. skip_update_check: Set to True to disable checking for gsutil updates. logging_filters: Optional list of logging.Filters to apply to this command's logger. do_shutdown: Stop all parallelism framework workers iff this is True. Raises: CommandException: if errors encountered. Returns: Return value(s) from Command that was run. """ if (not skip_update_check and self.MaybeCheckForAndOfferSoftwareUpdate(command_name, debug)): command_name = 'update' args = ['-n'] if not args: args = [] # Include api_version header in all commands. api_version = boto.config.get_value('GSUtil', 'default_api_version', '1') if not headers: headers = {} headers['x-goog-api-version'] = api_version if command_name not in self.command_map: close_matches = difflib.get_close_matches( command_name, self.command_map.keys(), n=1) if close_matches: # Instead of suggesting a deprecated command alias, suggest the new # name for that command. translated_command_name = ( OLD_ALIAS_MAP.get(close_matches[0], close_matches)[0]) print >> sys.stderr, 'Did you mean this?' print >> sys.stderr, '\t%s' % translated_command_name elif command_name == 'update' and gslib.IS_PACKAGE_INSTALL: sys.stderr.write( 'Update command is not supported for package installs; ' 'please instead update using your package manager.') raise CommandException('Invalid command "%s".' % command_name) if '--help' in args: new_args = [command_name] original_command_class = self.command_map[command_name] subcommands = original_command_class.help_spec.subcommand_help_text.keys() for arg in args: if arg in subcommands: new_args.append(arg) break # Take the first match and throw away the rest. args = new_args command_name = 'help' args = HandleArgCoding(args) command_class = self.command_map[command_name] command_inst = command_class( self, args, headers, debug, parallel_operations, self.bucket_storage_uri_class, self.gsutil_api_class_map_factory, test_method, logging_filters, command_alias_used=command_name) return_code = command_inst.RunCommand() if MultiprocessingIsAvailable()[0] and do_shutdown: ShutDownGsutil() if GetFailureCount() > 0: return_code = 1 return return_code def MaybeCheckForAndOfferSoftwareUpdate(self, command_name, debug): """Checks the last time we checked for an update and offers one if needed. Offer is made if the time since the last update check is longer than the configured threshold offers the user to update gsutil. Args: command_name: The name of the command being run. debug: Debug level to pass in to boto connection (range 0..3). Returns: True if the user decides to update. """ # Don't try to interact with user if: # - gsutil is not connected to a tty (e.g., if being run from cron); # - user is running gsutil -q # - user is running the config command (which could otherwise attempt to # check for an update for a user running behind a proxy, who has not yet # configured gsutil to go through the proxy; for such users we need the # first connection attempt to be made by the gsutil config command). # - user is running the version command (which gets run when using # gsutil -D, which would prevent users with proxy config problems from # sending us gsutil -D output). # - user is running the update command (which could otherwise cause an # additional note that an update is available when user is already trying # to perform an update); # - user specified gs_host (which could be a non-production different # service instance, in which case credentials won't work for checking # gsutil tarball). # - user is using a Cloud SDK install (which should only be updated via # gcloud components update) logger = logging.getLogger() gs_host = boto.config.get('Credentials', 'gs_host', None) if (not IsRunningInteractively() or command_name in ('config', 'update', 'ver', 'version') or not logger.isEnabledFor(logging.INFO) or gs_host or os.environ.get('CLOUDSDK_WRAPPER') == '1'): return False software_update_check_period = boto.config.getint( 'GSUtil', 'software_update_check_period', 30) # Setting software_update_check_period to 0 means periodic software # update checking is disabled. if software_update_check_period == 0: return False cur_ts = int(time.time()) if not os.path.isfile(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE): # Set last_checked_ts from date of VERSION file, so if the user installed # an old copy of gsutil it will get noticed (and an update offered) the # first time they try to run it. last_checked_ts = GetGsutilVersionModifiedTime() with open(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE, 'w') as f: f.write(str(last_checked_ts)) else: try: with open(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE, 'r') as f: last_checked_ts = int(f.readline()) except (TypeError, ValueError): return False if (cur_ts - last_checked_ts > software_update_check_period * SECONDS_PER_DAY): # Create a credential-less gsutil API to check for the public # update tarball. gsutil_api = GcsJsonApi(self.bucket_storage_uri_class, logger, credentials=NoOpCredentials(), debug=debug) cur_ver = LookUpGsutilVersion(gsutil_api, GSUTIL_PUB_TARBALL) with open(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE, 'w') as f: f.write(str(cur_ts)) (g, m) = CompareVersions(cur_ver, gslib.VERSION) if m: print '\n'.join(textwrap.wrap( 'A newer version of gsutil (%s) is available than the version you ' 'are running (%s). NOTE: This is a major new version, so it is ' 'strongly recommended that you review the release note details at ' '%s before updating to this version, especially if you use gsutil ' 'in scripts.' % (cur_ver, gslib.VERSION, RELEASE_NOTES_URL))) if gslib.IS_PACKAGE_INSTALL: return False print answer = raw_input('Would you like to update [y/N]? ') return answer and answer.lower()[0] == 'y' elif g: print '\n'.join(textwrap.wrap( 'A newer version of gsutil (%s) is available than the version you ' 'are running (%s). A detailed log of gsutil release changes is ' 'available at %s if you would like to read them before updating.' % (cur_ver, gslib.VERSION, RELEASE_NOTES_URL))) if gslib.IS_PACKAGE_INSTALL: return False print answer = raw_input('Would you like to update [Y/n]? ') return not answer or answer.lower()[0] != 'n' return False
bsd-3-clause
Jeff-Tian/mybnb
Python27/Lib/compiler/visitor.py
20
4009
from compiler import ast # XXX should probably rename ASTVisitor to ASTWalker # XXX can it be made even more generic? class ASTVisitor: """Performs a depth-first walk of the AST The ASTVisitor will walk the AST, performing either a preorder or postorder traversal depending on which method is called. methods: preorder(tree, visitor) postorder(tree, visitor) tree: an instance of ast.Node visitor: an instance with visitXXX methods The ASTVisitor is responsible for walking over the tree in the correct order. For each node, it checks the visitor argument for a method named 'visitNodeType' where NodeType is the name of the node's class, e.g. Class. If the method exists, it is called with the node as its sole argument. The visitor method for a particular node type can control how child nodes are visited during a preorder walk. (It can't control the order during a postorder walk, because it is called _after_ the walk has occurred.) The ASTVisitor modifies the visitor argument by adding a visit method to the visitor; this method can be used to visit a child node of arbitrary type. """ VERBOSE = 0 def __init__(self): self.node = None self._cache = {} def default(self, node, *args): for child in node.getChildNodes(): self.dispatch(child, *args) def dispatch(self, node, *args): self.node = node klass = node.__class__ meth = self._cache.get(klass, None) if meth is None: className = klass.__name__ meth = getattr(self.visitor, 'visit' + className, self.default) self._cache[klass] = meth ## if self.VERBOSE > 0: ## className = klass.__name__ ## if self.VERBOSE == 1: ## if meth == 0: ## print "dispatch", className ## else: ## print "dispatch", className, (meth and meth.__name__ or '') return meth(node, *args) def preorder(self, tree, visitor, *args): """Do preorder walk of tree using visitor""" self.visitor = visitor visitor.visit = self.dispatch self.dispatch(tree, *args) # XXX *args make sense? class ExampleASTVisitor(ASTVisitor): """Prints examples of the nodes that aren't visited This visitor-driver is only useful for development, when it's helpful to develop a visitor incrementally, and get feedback on what you still have to do. """ examples = {} def dispatch(self, node, *args): self.node = node meth = self._cache.get(node.__class__, None) className = node.__class__.__name__ if meth is None: meth = getattr(self.visitor, 'visit' + className, 0) self._cache[node.__class__] = meth if self.VERBOSE > 1: print "dispatch", className, (meth and meth.__name__ or '') if meth: meth(node, *args) elif self.VERBOSE > 0: klass = node.__class__ if klass not in self.examples: self.examples[klass] = klass print print self.visitor print klass for attr in dir(node): if attr[0] != '_': print "\t", "%-12.12s" % attr, getattr(node, attr) print return self.default(node, *args) # XXX this is an API change _walker = ASTVisitor def walk(tree, visitor, walker=None, verbose=None): if walker is None: walker = _walker() if verbose is not None: walker.VERBOSE = verbose walker.preorder(tree, visitor) return walker.visitor def dumpNode(node): print node.__class__ for attr in dir(node): if attr[0] != '_': print "\t", "%-10.10s" % attr, getattr(node, attr)
apache-2.0
zcmarkyoung/node-gyp
gyp/pylib/gyp/generator/gypd.py
912
3325
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """gypd output module This module produces gyp input as its output. Output files are given the .gypd extension to avoid overwriting the .gyp files that they are generated from. Internal references to .gyp files (such as those found in "dependencies" sections) are not adjusted to point to .gypd files instead; unlike other paths, which are relative to the .gyp or .gypd file, such paths are relative to the directory from which gyp was run to create the .gypd file. This generator module is intended to be a sample and a debugging aid, hence the "d" for "debug" in .gypd. It is useful to inspect the results of the various merges, expansions, and conditional evaluations performed by gyp and to see a representation of what would be fed to a generator module. It's not advisable to rename .gypd files produced by this module to .gyp, because they will have all merges, expansions, and evaluations already performed and the relevant constructs not present in the output; paths to dependencies may be wrong; and various sections that do not belong in .gyp files such as such as "included_files" and "*_excluded" will be present. Output will also be stripped of comments. This is not intended to be a general-purpose gyp pretty-printer; for that, you probably just want to run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip comments but won't do all of the other things done to this module's output. The specific formatting of the output generated by this module is subject to change. """ import gyp.common import errno import os import pprint # These variables should just be spit back out as variable references. _generator_identity_variables = [ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'INTERMEDIATE_DIR', 'PRODUCT_DIR', 'RULE_INPUT_ROOT', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'RULE_INPUT_NAME', 'RULE_INPUT_PATH', 'SHARED_INTERMEDIATE_DIR', ] # gypd doesn't define a default value for OS like many other generator # modules. Specify "-D OS=whatever" on the command line to provide a value. generator_default_variables = { } # gypd supports multiple toolsets generator_supports_multiple_toolsets = True # TODO(mark): This always uses <, which isn't right. The input module should # notify the generator to tell it which phase it is operating in, and this # module should use < for the early phase and then switch to > for the late # phase. Bonus points for carrying @ back into the output too. for v in _generator_identity_variables: generator_default_variables[v] = '<(%s)' % v def GenerateOutput(target_list, target_dicts, data, params): output_files = {} for qualified_target in target_list: [input_file, target] = \ gyp.common.ParseQualifiedTarget(qualified_target)[0:2] if input_file[-4:] != '.gyp': continue input_file_stem = input_file[:-4] output_file = input_file_stem + params['options'].suffix + '.gypd' if not output_file in output_files: output_files[output_file] = input_file for output_file, input_file in output_files.iteritems(): output = open(output_file, 'w') pprint.pprint(data[input_file], output) output.close()
mit
mosaic-cloud/mosaic-distribution-dependencies
dependencies/nodejs/0.8.22/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
2736
6387
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ class Tool(object): """Visual Studio tool.""" def __init__(self, name, attrs=None): """Initializes the tool. Args: name: Tool name. attrs: Dict of tool attributes; may be None. """ self._attrs = attrs or {} self._attrs['Name'] = name def _GetSpecification(self): """Creates an element for the tool. Returns: A new xml.dom.Element for the tool. """ return ['Tool', self._attrs] class Filter(object): """Visual Studio filter - that is, a virtual folder.""" def __init__(self, name, contents=None): """Initializes the folder. Args: name: Filter (folder) name. contents: List of filenames and/or Filter objects contained. """ self.name = name self.contents = list(contents or []) #------------------------------------------------------------------------------ class Writer(object): """Visual Studio XML project writer.""" def __init__(self, project_path, version, name, guid=None, platforms=None): """Initializes the project. Args: project_path: Path to the project file. version: Format version to emit. name: Name of the project. guid: GUID to use for project, if not None. platforms: Array of string, the supported platforms. If null, ['Win32'] """ self.project_path = project_path self.version = version self.name = name self.guid = guid # Default to Win32 for platforms. if not platforms: platforms = ['Win32'] # Initialize the specifications of the various sections. self.platform_section = ['Platforms'] for platform in platforms: self.platform_section.append(['Platform', {'Name': platform}]) self.tool_files_section = ['ToolFiles'] self.configurations_section = ['Configurations'] self.files_section = ['Files'] # Keep a dict keyed on filename to speed up access. self.files_dict = dict() def AddToolFile(self, path): """Adds a tool file to the project. Args: path: Relative path from project to tool file. """ self.tool_files_section.append(['ToolFile', {'RelativePath': path}]) def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools): """Returns the specification for a configuration. Args: config_type: Type of configuration node. config_name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Returns: """ # Handle defaults if not attrs: attrs = {} if not tools: tools = [] # Add configuration node and its attributes node_attrs = attrs.copy() node_attrs['Name'] = config_name specification = [config_type, node_attrs] # Add tool nodes and their attributes if tools: for t in tools: if isinstance(t, Tool): specification.append(t._GetSpecification()) else: specification.append(Tool(t)._GetSpecification()) return specification def AddConfig(self, name, attrs=None, tools=None): """Adds a configuration to the project. Args: name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. """ spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools) self.configurations_section.append(spec) def _AddFilesToNode(self, parent, files): """Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects. """ for f in files: if isinstance(f, Filter): node = ['Filter', {'Name': f.name}] self._AddFilesToNode(node, f.contents) else: node = ['File', {'RelativePath': f}] self.files_dict[f] = node parent.append(node) def AddFiles(self, files): """Adds files to the project. Args: files: A list of Filter objects and/or relative paths to files. This makes a copy of the file/filter tree at the time of this call. If you later add files to a Filter object which was passed into a previous call to AddFiles(), it will not be reflected in this project. """ self._AddFilesToNode(self.files_section, files) # TODO(rspangler) This also doesn't handle adding files to an existing # filter. That is, it doesn't merge the trees. def AddFileConfig(self, path, config, attrs=None, tools=None): """Adds a configuration to a file. Args: path: Relative path to the file. config: Name of configuration to add. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Raises: ValueError: Relative path does not match any file added via AddFiles(). """ # Find the file node with the right relative path parent = self.files_dict.get(path) if not parent: raise ValueError('AddFileConfig: file "%s" not in project.' % path) # Add the config to the file node spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs, tools) parent.append(spec) def WriteIfChanged(self): """Writes the project file.""" # First create XML content definition content = [ 'VisualStudioProject', {'ProjectType': 'Visual C++', 'Version': self.version.ProjectVersion(), 'Name': self.name, 'ProjectGUID': self.guid, 'RootNamespace': self.name, 'Keyword': 'Win32Proj' }, self.platform_section, self.tool_files_section, self.configurations_section, ['References'], # empty section self.files_section, ['Globals'] # empty section ] easy_xml.WriteXmlIfChanged(content, self.project_path, encoding="Windows-1252")
apache-2.0
NL66278/OCB
openerp/tools/lru.py
204
2946
# -*- coding: utf-8 -*- # taken from http://code.activestate.com/recipes/252524-length-limited-o1-lru-cache-implementation/ import threading from func import synchronized __all__ = ['LRU'] class LRUNode(object): __slots__ = ['prev', 'next', 'me'] def __init__(self, prev, me): self.prev = prev self.me = me self.next = None class LRU(object): """ Implementation of a length-limited O(1) LRU queue. Built for and used by PyPE: http://pype.sourceforge.net Copyright 2003 Josiah Carlson. """ def __init__(self, count, pairs=[]): self._lock = threading.RLock() self.count = max(count, 1) self.d = {} self.first = None self.last = None for key, value in pairs: self[key] = value @synchronized() def __contains__(self, obj): return obj in self.d @synchronized() def __getitem__(self, obj): a = self.d[obj].me self[a[0]] = a[1] return a[1] @synchronized() def __setitem__(self, obj, val): if obj in self.d: del self[obj] nobj = LRUNode(self.last, (obj, val)) if self.first is None: self.first = nobj if self.last: self.last.next = nobj self.last = nobj self.d[obj] = nobj if len(self.d) > self.count: if self.first == self.last: self.first = None self.last = None return a = self.first a.next.prev = None self.first = a.next a.next = None del self.d[a.me[0]] del a @synchronized() def __delitem__(self, obj): nobj = self.d[obj] if nobj.prev: nobj.prev.next = nobj.next else: self.first = nobj.next if nobj.next: nobj.next.prev = nobj.prev else: self.last = nobj.prev del self.d[obj] @synchronized() def __iter__(self): cur = self.first while cur is not None: cur2 = cur.next yield cur.me[1] cur = cur2 @synchronized() def __len__(self): return len(self.d) @synchronized() def iteritems(self): cur = self.first while cur is not None: cur2 = cur.next yield cur.me cur = cur2 @synchronized() def iterkeys(self): return iter(self.d) @synchronized() def itervalues(self): for i,j in self.iteritems(): yield j @synchronized() def keys(self): return self.d.keys() @synchronized() def pop(self,key): v=self[key] del self[key] return v @synchronized() def clear(self): self.d = {} self.first = None self.last = None # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
magvugr/AT
EntVirtual/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py
357
38875
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import absolute_import import os import re import sys if sys.version_info[0] < 3: from StringIO import StringIO string_types = basestring, text_type = unicode from types import FileType as file_type import __builtin__ as builtins import ConfigParser as configparser from ._backport import shutil from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, pathname2url, ContentTooShortError, splittype) def quote(s): if isinstance(s, unicode): s = s.encode('utf-8') return _quote(s) import urllib2 from urllib2 import (Request, urlopen, URLError, HTTPError, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPSHandler, HTTPHandler, HTTPRedirectHandler, build_opener) import httplib import xmlrpclib import Queue as queue from HTMLParser import HTMLParser import htmlentitydefs raw_input = raw_input from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse _userprog = None def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: return match.group(1, 2) return None, host else: from io import StringIO string_types = str, text_type = str from io import TextIOWrapper as file_type import builtins import configparser import shutil from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, unquote, urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, pathname2url, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPSHandler, HTTPHandler, HTTPRedirectHandler, build_opener) from urllib.error import HTTPError, URLError, ContentTooShortError import http.client as httplib import urllib.request as urllib2 import xmlrpc.client as xmlrpclib import queue from html.parser import HTMLParser import html.entities as htmlentitydefs raw_input = input from itertools import filterfalse filter = filter try: from ssl import match_hostname, CertificateError except ImportError: class CertificateError(ValueError): pass def _dnsname_to_pat(dn): pats = [] for frag in dn.split(r'.'): if frag == '*': # When '*' is a fragment by itself, it matches a non-empty # dotless fragment. pats.append('[^.]+') else: # Otherwise, '*' matches any dotless fragment. frag = re.escape(frag) pats.append(frag.replace(r'\*', '[^.]*')) return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules are mostly followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_to_pat(value).match(hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_to_pat(value).match(hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") try: from types import SimpleNamespace as Container except ImportError: class Container(object): """ A generic container for when multiple values need to be returned """ def __init__(self, **kwargs): self.__dict__.update(kwargs) try: from shutil import which except ImportError: # Implementation from Python 3.3 def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if not normdir in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None # ZipFile is a context manager in 2.7, but not in 2.6 from zipfile import ZipFile as BaseZipFile if hasattr(BaseZipFile, '__enter__'): ZipFile = BaseZipFile else: from zipfile import ZipExtFile as BaseZipExtFile class ZipExtFile(BaseZipExtFile): def __init__(self, base): self.__dict__.update(base.__dict__) def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate class ZipFile(BaseZipFile): def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate def open(self, *args, **kwargs): base = BaseZipFile.open(self, *args, **kwargs) return ZipExtFile(base) try: from platform import python_implementation except ImportError: # pragma: no cover def python_implementation(): """Return a string identifying the Python implementation.""" if 'PyPy' in sys.version: return 'PyPy' if os.name == 'java': return 'Jython' if sys.version.startswith('IronPython'): return 'IronPython' return 'CPython' try: import sysconfig except ImportError: # pragma: no cover from ._backport import sysconfig try: callable = callable except NameError: # pragma: no cover from collections import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode fsdecode = os.fsdecode except AttributeError: # pragma: no cover _fsencoding = sys.getfilesystemencoding() if _fsencoding == 'mbcs': _fserrors = 'strict' else: _fserrors = 'surrogateescape' def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, text_type): return filename.encode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): if isinstance(filename, text_type): return filename elif isinstance(filename, bytes): return filename.decode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) try: from tokenize import detect_encoding except ImportError: # pragma: no cover from codecs import BOM_UTF8, lookup import re cookie_re = re.compile("coding[:=]\s*([-\w.]+)") def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argment, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] # For converting & <-> &amp; etc. try: from html import escape except ImportError: from cgi import escape if sys.version_info[:2] < (3, 4): unescape = HTMLParser().unescape else: from html import unescape try: from collections import ChainMap except ImportError: # pragma: no cover from collections import MutableMapping try: from reprlib import recursive_repr as _recursive_repr except ImportError: def _recursive_repr(fillvalue='...'): ''' Decorator to make a repr function return fillvalue for a recursive call ''' def decorating_function(user_function): repr_running = set() def wrapper(self): key = id(self), get_ident() if key in repr_running: return fillvalue repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result # Can't use functools.wraps() here because of bootstrap issues wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) return wrapper return decorating_function class ChainMap(MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self): # like Django's Context.push() 'New ChainMap with a new dict followed by all previous maps.' return self.__class__({}, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() try: from imp import cache_from_source except ImportError: # pragma: no cover def cache_from_source(path, debug_override=None): assert path.endswith('.py') if debug_override is None: debug_override = __debug__ if debug_override: suffix = 'c' else: suffix = 'o' return path + suffix try: from collections import OrderedDict except ImportError: # pragma: no cover ## {{{ http://code.activestate.com/recipes/576693/ (r9) # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running=None): 'od.__repr__() <==> repr(od)' if not _repr_running: _repr_running = {} call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) try: from logging.config import BaseConfigurator, valid_ident except ImportError: # pragma: no cover IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = staticmethod(__import__) def __init__(self, config): self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) #rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for name, value in props.items(): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value
gpl-3.0
flavour/cedarbluff
tests/selenium/scripts/warehouseTest.py
6
7344
from sahanaTest import SahanaTest import unittest, re, time class WarehouseTest(SahanaTest): """ Test the Warehouse component of the Inventory Management System """ _sortList = ("addWarehouses", "stockWarehouse", #"removeWarehouses", ) def firstRun(self): WarehouseTest.warehouses = [] WarehouseTest.orgs = [] # Setup the template for the Warehouse From WarehouseTest.warehouseCreateTemplate = self.action.getFormTemplate() WarehouseTest.warehouseCreateTemplate.addInput(labelID="org_office_name__label", inputID="org_office_name") def createWarehouse(self, name, organisation, country, state=None, district=None, lat=None, lon=None, address=None, phone=None, email=None, comment=None): sel = self.selenium name = name.strip() organisation = organisation.strip() country = country.strip() state = state.strip() district = district.strip() lat = lat.strip() lon = lon.strip() address = address.strip() phone = phone.strip() email = email.strip() comment = comment.strip() if organisation not in self.orgs: self.action.openPage("org/organisation") matches = self.action.searchMatchesFound(organisation) if matches == 0: self.createOrganisation(organisation) self.orgs.append(organisation) self.action.openPage("inv/warehouse/create") self.assertEqual("Add Warehouse", sel.get_text("//h2")) self.action.fillForm("org_office_name", name) self.action.fillAutoComplete("dummy_org_office_organisation_id", organisation) self.action.fillForm("gis_location_L0", country, "select") self.action.fillForm("gis_location_L1", state) self.action.fillForm("gis_location_L2", district) self.action.fillForm("gis_location_lat", lat) self.action.fillForm("gis_location_street", address) self.action.fillForm("gis_location_lon", lon) self.action.fillForm("org_office_phone1", phone) self.action.fillForm("org_office_email", email) self.action.fillForm("org_office_comments", comment) # Now save the form self.assertTrue(self.action.saveForm("Save", "Warehouse added")) print "Warehouse %s created" % name def createOrganisation(self, name): sel = self.selenium name = name.strip() self.action.openPage("org/organisation/create") self.assertEqual("Add Organization", sel.get_text("//h2")) self.action.fillForm("org_organisation_name", name) # Now save the form self.assertTrue(self.action.saveForm("Save", "Organization added")) print "Organization %s created" % name def addWarehouses(self): # Log in as admin an then move to the add warehouse page self.useSahanaAdminAccount() self.action.login(self._user, self._password) # Add the test warehouses source = open("../data/warehouse.txt", "r") values = source.readlines() source.close() self.action.openPage("inv/warehouse") for warehouse in values: details = warehouse.split(",") if len(details) == 11: name = details[0].strip() matches = self.action.searchMatchesFound(name) if matches == 0: self.createWarehouse(name, details[1].strip(), details[2].strip(), details[3].strip(), details[4].strip(), details[5].strip(), details[6].strip(), details[7].strip(), details[8].strip(), details[9].strip(), details[10].strip(), ) self.warehouses.append(name) def addItem(self, warehouse, item, size, quantity, expireDate, comments): """ Add an Item to a Warehouse @ToDo: Currently this does it by going to a URL without a menu entry This should eb changed to doing it via the Warehouse's Inventory Items tab """ sel = self.selenium self.action.openPage("inv/inv_item") sel.click("show-add-btn") self.assertEqual("List Items in Inventory", sel.get_text("//h2")) self.action.fillForm("inv_inv_item_site_id", warehouse, "select") self.action.fillForm("inv_inv_item_item_id", item, "select") # Pause to let the select box be filled up for i in range(30): if size in sel.get_text("inv_inv_item_item_pack_id"): break time.sleep(1) self.action.fillForm("inv_inv_item_item_pack_id", size, "select") self.action.fillForm("inv_inv_item_quantity", quantity) self.action.fillForm("inv_inv_item_expiry_date", expireDate) self.action.fillForm("inv_inv_item_comments", comments) # Now save the form self.assertTrue(self.action.saveForm("Save", "Item added to Inventory")) def stockWarehouse(self): # Log in as admin an then move read the inventory item file self.useSahanaAdminAccount() self.action.login(self._user, self._password) # Add the test warehouses source = open("../data/inventoryItems.txt", "r") values = source.readlines() source.close() # For each item add it to the warehouse for items in values: details = items.split(",") if len(details) == 6: self.addItem(details[0].strip(), details[1].strip(), details[2].strip(), details[3].strip(), details[4].strip(), details[5].strip(), ) # Fails to logout cleanly when not in lastRun #def removeWarehouses(self): def lastRun(self): """ Delete the test warehouses """ if len(self.warehouses) == 0: return sel = self.selenium self.useSahanaAdminAccount() self.action.login(self._user, self._password) self.action.openPage("inv/warehouse") allPassed = True for warehouse in self.warehouses: self.action.searchUnique(warehouse) sel.click("link=Delete") self.action.confirmDelete() if self.action.successMsg("Warehouse deleted"): print "Warehouse %s deleted" % warehouse else: print "Failed to deleted warehouse %s" % warehouse allPassed = False self.assertTrue(allPassed) if __name__ == "__main__": SahanaTest.setUpHierarchy() unittest.main() WarehouseTest.selenium.stop()
mit
DOAJ/doaj
portality/formcontext/render.py
1
23506
# DEPRECATED - this file is dead, and should be removed by the end of the redesign project from portality.formcontext.formhelper import FormHelperBS3 from portality.formcontext.choices import Choices from copy import deepcopy class Renderer(object): def __init__(self): self.FIELD_GROUPS = {} self.fh = FormHelperBS3() self._error_fields = [] self._disabled_fields = [] self._disable_all_fields = False self._highlight_completable_fields = False def check_field_group_exists(self, field_group_name): """ Return true if the field group exists in this form """ group_def = self.FIELD_GROUPS.get(field_group_name) if group_def is None: return False else: return True def render_field_group(self, form_context, field_group_name=None, group_cfg=None): if field_group_name is None: return self._render_all(form_context) # get the group definition group_def = self.FIELD_GROUPS.get(field_group_name) if group_def is None: return "" # build the frag frag = "" for entry in group_def: field_name = list(entry.keys())[0] config = entry.get(field_name) config = deepcopy(config) config = self._rewrite_extra_fields(form_context, config) field = form_context.form[field_name] if field_name in self.disabled_fields or self._disable_all_fields is True: config["disabled"] = "disabled" if self._highlight_completable_fields is True: valid = field.validate(form_context.form) config["complete_me"] = not valid if group_cfg is not None: config.update(group_cfg) frag += self.fh.render_field(field, **config) return frag @property def error_fields(self): return self._error_fields def set_error_fields(self, fields): self._error_fields = fields @property def disabled_fields(self): return self._disabled_fields def set_disabled_fields(self, fields): self._disabled_fields = fields def disable_all_fields(self, disable): self._disable_all_fields = disable def _rewrite_extra_fields(self, form_context, config): if "extra_input_fields" in config: config = deepcopy(config) for opt, field_ref in config.get("extra_input_fields").items(): extra_field = form_context.form[field_ref] config["extra_input_fields"][opt] = extra_field return config def _render_all(self, form_context): frag = "" for field in form_context.form: frag += self.fh.render_field(form_context, field.short_name) return frag def find_field(self, field, field_group): for index, item in enumerate(self.FIELD_GROUPS[field_group]): if field in item: return index def insert_field_after(self, field_to_insert, after_this_field, field_group): self.FIELD_GROUPS[field_group].insert( self.find_field(after_this_field, field_group) + 1, field_to_insert ) class BasicJournalInformationRenderer(Renderer): def __init__(self): super(BasicJournalInformationRenderer, self).__init__() # allow the subclass to define the order the groups should be considered in. This is useful for # numbering questions and determining first errors self.NUMBERING_ORDER = ["basic_info", "editorial_process", "openness", "content_licensing", "copyright"] self.ERROR_CHECK_ORDER = deepcopy(self.NUMBERING_ORDER) # define the basic field groups self.FIELD_GROUPS = { "basic_info" : [ {"title" : {"class": "input-xlarge"}}, {"url" : {"class": "input-xlarge"}}, {"alternative_title" : {"class": "input-xlarge"}}, {"pissn" : {"class": "input-small", "size": "9", "maxlength": "9"}}, {"eissn" : {"class": "input-small", "size": "9", "maxlength": "9"}}, {"publisher" : {"class": "input-xlarge"}}, {"society_institution" : {"class": "input-xlarge"}}, {"platform" : {"class": "input-xlarge"}}, {"contact_name" : {}}, {"contact_email" : {}}, {"confirm_contact_email" : {}}, {"country" : {"class": "input-large"}}, {"processing_charges" : {}}, {"processing_charges_url" : {"class": "input-xlarge"}}, {"processing_charges_amount" : {"class": "input-mini"}}, {"processing_charges_currency" : {"class": "input-large"}}, {"submission_charges" : {}}, {"submission_charges_url" : {"class": "input-xlarge"}}, {"submission_charges_amount" : {"class": "input-mini"}}, {"submission_charges_currency" : {"class": "input-large"}}, {"waiver_policy" : {}}, {"waiver_policy_url" : {"class": "input-xlarge"}}, { "digital_archiving_policy" : { "extra_input_fields" : { Choices.digital_archiving_policy_val("other") : "digital_archiving_policy_other", Choices.digital_archiving_policy_val("library") : "digital_archiving_policy_library" } } }, {"digital_archiving_policy_url" : {"class": "input-xlarge"}}, {"crawl_permission" : {}}, { "article_identifiers" : { "extra_input_fields": { Choices.article_identifiers_val("other") : "article_identifiers_other" } } }, {"download_statistics" : {}}, {"download_statistics_url" : {"class": "input-xlarge"}}, {"first_fulltext_oa_year" : {"class": "input-mini"}}, { "fulltext_format" : { "extra_input_fields": { Choices.fulltext_format_val("other") : "fulltext_format_other" } } }, {"keywords" : {"class": "input-xlarge"}}, {"languages" : {"class": "input-xlarge"}} ], "editorial_process" : [ {"editorial_board_url" : {"class": "input-xlarge"}}, {"review_process" : {"class" : "form-control input-xlarge"}}, {"review_process_url" : {"class": "input-xlarge"}}, {"aims_scope_url" : {"class": "input-xlarge"}}, {"instructions_authors_url" : {"class": "input-xlarge"}}, {"plagiarism_screening" : {}}, {"plagiarism_screening_url" : {"class": "input-xlarge"}}, {"publication_time" : {"class": "input-tiny"}} ], "openness" : [ {"oa_statement_url" : {"class": "input-xlarge"}} ], "content_licensing" : [ {"license_embedded" : {}}, {"license_embedded_url" : {"class": "input-xlarge"}}, { "license" : { "extra_input_fields": { Choices.licence_val("other") : "license_other" } } }, {"license_checkbox" : {}}, {"license_url" : {"class": "input-xlarge"}}, {"open_access" : {}}, { "deposit_policy" : { "extra_input_fields": { Choices.open_access_val("other") : "deposit_policy_other" } } } ], "copyright" : [ { "copyright" : {} }, {"copyright_url" : {"class": "input-xlarge"}}, { "publishing_rights" : {} }, {"publishing_rights_url" : {"class": "input-xlarge"}} ] } def check_field_groups(self): ''' Check whether field groups which are being referenced in various renderer lists actually exist. Should only be called in self.__init__ by non-abstract classes, i.e. the bottom of the inheritance tree, the ones that would actually get used to render forms. Otherwise the check becomes meaningless (and always fails) as it will check whether all groups are defined in a class that isn't supposed to have all the definitions - being abstract, it may only have a few common ones. ''' for group in self.NUMBERING_ORDER: try: self.FIELD_GROUPS[group] except KeyError as e: raise KeyError( 'Can\'t number a group which does not exist. ' 'Field group "{0}" is not defined in self.FIELD_GROUPS ' 'but is present in self.NUMBERING_ORDER. ' 'This is in renderer {1}.'.format(str(e), self.__class__.__name__) ) for group in self.ERROR_CHECK_ORDER: try: self.FIELD_GROUPS[group] except KeyError as e: raise KeyError( 'Can\'t check a group which does not exist for errors. ' 'Field group "{0}" is not defined in self.FIELD_GROUPS ' 'but is present in self.ERROR_CHECK_ORDER. ' 'This is in renderer {1}.'.format(str(e), self.__class__.__name__) ) def number_questions(self): q = 1 for g in self.NUMBERING_ORDER: cfg = self.FIELD_GROUPS.get(g) for obj in cfg: field = list(obj.keys())[0] obj[field]["q_num"] = str(q) q += 1 def question_number(self, field): for g in self.FIELD_GROUPS: cfg = self.FIELD_GROUPS.get(g) for obj in cfg: f = list(obj.keys())[0] if f == field and "q_num" in obj[f]: return obj[f]["q_num"] return "" def set_error_fields(self, fields): super(BasicJournalInformationRenderer, self).set_error_fields(fields) # find the first error in the form and tag it found = False for g in self.ERROR_CHECK_ORDER: cfg = self.FIELD_GROUPS.get(g) # If a group is specified as part of the error checks but is # not defined in self.FIELD_GROUPS then do not try to check # it for errors - there are no fields to check. if cfg: for obj in cfg: field = list(obj.keys())[0] if field in self.error_fields: obj[field]["first_error"] = True found = True break if found: break class ApplicationRenderer(BasicJournalInformationRenderer): def __init__(self): super(ApplicationRenderer, self).__init__() # allow the subclass to define the order the groups should be considered in. This is useful for # numbering questions and determining first errors self.NUMBERING_ORDER.append("submitter_info") self.ERROR_CHECK_ORDER = deepcopy(self.NUMBERING_ORDER) # in this case these can be the same self.FIELD_GROUPS["submitter_info"] = [ {"suggester_name" : {"label_width" : 5}}, {"suggester_email" : {"label_width" : 5, "class": "input-xlarge"}}, {"suggester_email_confirm" : {"label_width" : 5, "class": "input-xlarge"}}, ] self.insert_field_after( field_to_insert={"articles_last_year" : {"class": "input-mini"}}, after_this_field="submission_charges_currency", field_group="basic_info" ) self.insert_field_after( field_to_insert={"articles_last_year_url" : {"class": "input-xlarge"}}, after_this_field="articles_last_year", field_group="basic_info" ) self.insert_field_after( field_to_insert={"metadata_provision" : {}}, after_this_field="article_identifiers", field_group="basic_info" ) class PublicApplicationRenderer(ApplicationRenderer): def __init__(self): super(PublicApplicationRenderer, self).__init__() # explicitly call number questions, as it is not called by default (because other implementations may want # to mess with the group order and field groups first) self.number_questions() self.check_field_groups() class PublisherUpdateRequestRenderer(ApplicationRenderer): def __init__(self): super(PublisherUpdateRequestRenderer, self).__init__() self.NUMBERING_ORDER.remove("submitter_info") self.ERROR_CHECK_ORDER = deepcopy(self.NUMBERING_ORDER) del self.FIELD_GROUPS["submitter_info"] # explicitly call number questions, as it is not called by default (because other implementations may want # to mess with the group order and field groups first self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class PublisherUpdateRequestReadOnlyRenderer(ApplicationRenderer): def __init__(self): super(PublisherUpdateRequestReadOnlyRenderer, self).__init__() self.ERROR_CHECK_ORDER = [] self.number_questions() self.check_field_groups() class ManEdApplicationReviewRenderer(ApplicationRenderer): def __init__(self): super(ManEdApplicationReviewRenderer, self).__init__() # extend the list of field groups self.FIELD_GROUPS["status"] = [ {"application_status" : {"class" : "form-control input-large"}} ] self.FIELD_GROUPS["account"] = [ {"owner" : {"class" : "input-large"}} ] self.FIELD_GROUPS["subject"] = [ {"subject" : {}} ] self.FIELD_GROUPS["editorial"] = [ {"editor_group" : {"class" : "input-large"}}, {"editor" : {"class" : "form-control input-large"}}, ] self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "container_class" : "deletable", "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.FIELD_GROUPS["seal"] = [ {"doaj_seal" : {}} ] self.FIELD_GROUPS["continuations"] = [ {"replaces" : {"class": "input-xlarge"}}, {"is_replaced_by" : {"class": "input-xlarge"}}, {"discontinued_date" : {}} ] self.ERROR_CHECK_ORDER = ["status", "account", "editorial", "continuations", "subject"] + self.ERROR_CHECK_ORDER + ["notes"] # but do NOT include the new groups in self.NUMBERING_ORDER, don"t want them numbered self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class EditorApplicationReviewRenderer(ApplicationRenderer): def __init__(self): super(EditorApplicationReviewRenderer, self).__init__() # extend the list of field groups self.FIELD_GROUPS["status"] = [ {"application_status" : {"class" : "form-control input-large"}} ] self.FIELD_GROUPS["subject"] = [ {"subject" : {}} ] self.FIELD_GROUPS["editorial"] = [ {"editor_group" : {"class" : "input-large"}}, {"editor" : {"class" : "form-control input-large"}}, ] self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.ERROR_CHECK_ORDER = ["status", "editorial", "subject"] + self.ERROR_CHECK_ORDER + ["notes"] # don"t want the extra groups numbered so not added to self.NUMBERING_ORDER self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class AssEdApplicationReviewRenderer(ApplicationRenderer): def __init__(self): super(AssEdApplicationReviewRenderer, self).__init__() # extend the list of field groups self.FIELD_GROUPS["status"] = [ {"application_status" : {"class" : "form-control input-large"}} ] self.FIELD_GROUPS["subject"] = [ {"subject" : {}} ] self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.ERROR_CHECK_ORDER = ["status", "subject"] + self.ERROR_CHECK_ORDER + ["notes"] self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class JournalRenderer(BasicJournalInformationRenderer): def __init__(self): super(JournalRenderer, self).__init__() self.FIELD_GROUPS["subject"] = [ {"subject" : {}} ] self.FIELD_GROUPS["old_journal_fields"] = [ {"author_pays": {}}, {"author_pays_url": {"class": "input-xlarge"}}, {"oa_end_year": {"class": "input-mini"}}, ] def render_field_group(self, form_context, field_group_name=None, **kwargs): if field_group_name == "old_journal_fields": display_old_journal_fields = False for old_field_def in self.FIELD_GROUPS["old_journal_fields"]: old_field_name = list(old_field_def.keys())[0] old_field = getattr(form_context.form, old_field_name) if old_field: if old_field.data and old_field.data != 'None': display_old_journal_fields = True if not display_old_journal_fields: return "" # otherwise let it fall through and render the old journal fields return super(JournalRenderer, self).render_field_group(form_context, field_group_name, **kwargs) class ManEdJournalReviewRenderer(JournalRenderer): def __init__(self): super(ManEdJournalReviewRenderer, self).__init__() # extend the list of field groups self.FIELD_GROUPS["account"] = [ {"owner" : {"class" : "input-large"}} ] self.FIELD_GROUPS["editorial"] = [ {"editor_group" : {"class" : "input-large"}}, {"editor" : {"class" : "form-control input-large"}}, ] self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "container_class" : "deletable", "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.FIELD_GROUPS["make_all_fields_optional"] = [ {"make_all_fields_optional": {}} ] self.FIELD_GROUPS["seal"] = [ {"doaj_seal" : {}} ] self.FIELD_GROUPS["continuations"] = [ {"replaces" : {"class": "input-xlarge"}}, {"is_replaced_by" : {"class": "input-xlarge"}}, {"discontinued_date" : {}} ] self.ERROR_CHECK_ORDER = ["make_all_fields_optional", "account", "editorial", "continuations", "subject"] + self.ERROR_CHECK_ORDER + ["notes"] self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class ManEdJournalBulkEditRenderer(Renderer): def __init__(self): super(ManEdJournalBulkEditRenderer, self).__init__() self.FIELD_GROUPS = { "main" : [ {"publisher" : {"class": "input-xlarge"}}, {"platform" : {"class": "input-xlarge"}}, {"country" : {"class": "input-large"}}, {"owner" : {"class" : "input-large"}}, {"contact_name" : {"class" : "input-large"}}, {"contact_email" : {"class" : "input-large"}}, {"doaj_seal" : {"class" : "form-control input-large"}} ] } class EditorJournalReviewRenderer(JournalRenderer): def __init__(self): self.display_old_journal_fields = False # an instance var flag for the template super(EditorJournalReviewRenderer, self).__init__() self.FIELD_GROUPS["editorial"] = [ {"editor_group" : {"class" : "input-large"}}, {"editor" : {"class" : "form-control input-large"}}, ] self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.ERROR_CHECK_ORDER = ["editorial", "subject"] + self.ERROR_CHECK_ORDER + ["notes"] # don't want the extra groups numbered so not added to self.NUMBERING_ORDER self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class AssEdJournalReviewRenderer(JournalRenderer): def __init__(self): super(AssEdJournalReviewRenderer, self).__init__() # extend the list of field groups self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.ERROR_CHECK_ORDER = ["subject"] + self.ERROR_CHECK_ORDER + ["notes"] self.number_questions() self.check_field_groups() self._highlight_completable_fields = True class ReadOnlyJournalRenderer(JournalRenderer): def __init__(self): super(ReadOnlyJournalRenderer, self).__init__() # extend the list of field groups self.FIELD_GROUPS["notes"] = [ { "notes" : { "render_subfields_horizontal" : True, "subfield_display-note" : "8", "subfield_display-date" : "3", "label_width" : 0 } } ] self.ERROR_CHECK_ORDER = [] self.number_questions() self.check_field_groups()
apache-2.0
XuanyuZhao1984/MeanJS_train1
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
2736
6387
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ class Tool(object): """Visual Studio tool.""" def __init__(self, name, attrs=None): """Initializes the tool. Args: name: Tool name. attrs: Dict of tool attributes; may be None. """ self._attrs = attrs or {} self._attrs['Name'] = name def _GetSpecification(self): """Creates an element for the tool. Returns: A new xml.dom.Element for the tool. """ return ['Tool', self._attrs] class Filter(object): """Visual Studio filter - that is, a virtual folder.""" def __init__(self, name, contents=None): """Initializes the folder. Args: name: Filter (folder) name. contents: List of filenames and/or Filter objects contained. """ self.name = name self.contents = list(contents or []) #------------------------------------------------------------------------------ class Writer(object): """Visual Studio XML project writer.""" def __init__(self, project_path, version, name, guid=None, platforms=None): """Initializes the project. Args: project_path: Path to the project file. version: Format version to emit. name: Name of the project. guid: GUID to use for project, if not None. platforms: Array of string, the supported platforms. If null, ['Win32'] """ self.project_path = project_path self.version = version self.name = name self.guid = guid # Default to Win32 for platforms. if not platforms: platforms = ['Win32'] # Initialize the specifications of the various sections. self.platform_section = ['Platforms'] for platform in platforms: self.platform_section.append(['Platform', {'Name': platform}]) self.tool_files_section = ['ToolFiles'] self.configurations_section = ['Configurations'] self.files_section = ['Files'] # Keep a dict keyed on filename to speed up access. self.files_dict = dict() def AddToolFile(self, path): """Adds a tool file to the project. Args: path: Relative path from project to tool file. """ self.tool_files_section.append(['ToolFile', {'RelativePath': path}]) def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools): """Returns the specification for a configuration. Args: config_type: Type of configuration node. config_name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Returns: """ # Handle defaults if not attrs: attrs = {} if not tools: tools = [] # Add configuration node and its attributes node_attrs = attrs.copy() node_attrs['Name'] = config_name specification = [config_type, node_attrs] # Add tool nodes and their attributes if tools: for t in tools: if isinstance(t, Tool): specification.append(t._GetSpecification()) else: specification.append(Tool(t)._GetSpecification()) return specification def AddConfig(self, name, attrs=None, tools=None): """Adds a configuration to the project. Args: name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. """ spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools) self.configurations_section.append(spec) def _AddFilesToNode(self, parent, files): """Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects. """ for f in files: if isinstance(f, Filter): node = ['Filter', {'Name': f.name}] self._AddFilesToNode(node, f.contents) else: node = ['File', {'RelativePath': f}] self.files_dict[f] = node parent.append(node) def AddFiles(self, files): """Adds files to the project. Args: files: A list of Filter objects and/or relative paths to files. This makes a copy of the file/filter tree at the time of this call. If you later add files to a Filter object which was passed into a previous call to AddFiles(), it will not be reflected in this project. """ self._AddFilesToNode(self.files_section, files) # TODO(rspangler) This also doesn't handle adding files to an existing # filter. That is, it doesn't merge the trees. def AddFileConfig(self, path, config, attrs=None, tools=None): """Adds a configuration to a file. Args: path: Relative path to the file. config: Name of configuration to add. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Raises: ValueError: Relative path does not match any file added via AddFiles(). """ # Find the file node with the right relative path parent = self.files_dict.get(path) if not parent: raise ValueError('AddFileConfig: file "%s" not in project.' % path) # Add the config to the file node spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs, tools) parent.append(spec) def WriteIfChanged(self): """Writes the project file.""" # First create XML content definition content = [ 'VisualStudioProject', {'ProjectType': 'Visual C++', 'Version': self.version.ProjectVersion(), 'Name': self.name, 'ProjectGUID': self.guid, 'RootNamespace': self.name, 'Keyword': 'Win32Proj' }, self.platform_section, self.tool_files_section, self.configurations_section, ['References'], # empty section self.files_section, ['Globals'] # empty section ] easy_xml.WriteXmlIfChanged(content, self.project_path, encoding="Windows-1252")
mit
georgestarcher/TA-SyncKVStore
bin/ta_synckvstore/solnlib/acl.py
6
6169
# Copyright 2016 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' This module contains interfaces that support CRUD operations on ACL. ''' import json from . import splunk_rest_client as rest_client from .packages.splunklib import binding from .utils import retry __all__ = ['ACLException', 'ACLManager'] class ACLException(Exception): pass class ACLManager(object): '''ACL manager. :param session_key: Splunk access token. :type session_key: ``string`` :param app: App name of namespace. :type app: ``string`` :param owner: (optional) Owner of namespace, default is `nobody`. :type owner: ``string`` :param scheme: (optional) The access scheme, default is None. :type scheme: ``string`` :param host: (optional) The host name, default is None. :type host: ``string`` :param port: (optional) The port number, default is None. :type port: ``integer`` :param context: Other configurations for Splunk rest client. :type context: ``dict`` Usage:: >>> import solnlib.acl as sacl >>> saclm = sacl.ACLManager(session_key, 'Splunk_TA_test') >>> saclm.get('data/transforms/extractions') >>> saclm.update('data/transforms/extractions/_acl', perms_read=['*'], perms_write=['*']) ''' def __init__(self, session_key, app, owner='nobody', scheme=None, host=None, port=None, **context): self._rest_client = rest_client.SplunkRestClient(session_key, app, owner=owner, scheme=scheme, host=host, port=port, **context) @retry(exceptions=[binding.HTTPError]) def get(self, path): '''Get ACL of /servicesNS/{`owner`}/{`app`}/{`path`}. :param path: Path of ACL relative to /servicesNS/{`owner`}/{`app`} :type path: ``string`` :returns: A dict contains ACL. :rtype: ``dict`` :raises ACLException: If `path` is invalid. Usage:: >>> aclm = acl.ACLManager(session_key, 'Splunk_TA_test') >>> perms = aclm.get('data/transforms/extractions/_acl') ''' try: content = self._rest_client.get( path, output_mode='json').body.read() except binding.HTTPError as e: if e.status != 404: raise raise ACLException('Invalid endpoint: %s.', path) return json.loads(content)['entry'][0]['acl'] @retry(exceptions=[binding.HTTPError]) def update(self, path, owner=None, perms_read=None, perms_write=None): '''Update ACL of /servicesNS/{`owner`}/{`app`}/{`path`}. If the ACL is per-entity (ends in /acl), owner can be reassigned. If the acl is endpoint-level (ends in _acl), owner will be ignored. The 'sharing' setting is always retrieved from the current. :param path: Path of ACL relative to /servicesNS/{owner}/{app}. MUST end with /acl or /_acl indicating whether the permission is applied at the per-entity level or endpoint level respectively. :type path: ``string`` :param owner: (optional) New owner of ACL, default is `nobody`. :type owner: ``string`` :param perms_read: (optional) List of roles (['*'] for all roles). If unspecified we will POST with current (if available) perms.read, default is None. :type perms_read: ``list`` :param perms_write: (optional) List of roles (['*'] for all roles). If unspecified we will POST with current (if available) perms.write, default is None. :type perms_write: ``list`` :returns: A dict contains ACL after update. :rtype: ``dict`` :raises ACLException: If `path` is invalid. Usage:: >>> aclm = acl.ACLManager(session_key, 'Splunk_TA_test') >>> perms = aclm.update('data/transforms/extractions/_acl', perms_read=['admin'], perms_write=['admin']) ''' if not path.endswith('/acl') and not path.endswith('/_acl'): raise ACLException( 'Invalid endpoint: %s, must end with /acl or /_acl.' % path) curr_acl = self.get(path) postargs = {} if perms_read: postargs['perms.read'] = ','.join(perms_read) else: curr_read = curr_acl['perms'].get('read', []) if curr_read: postargs['perms.read'] = ','.join(curr_read) if perms_write: postargs['perms.write'] = ','.join(perms_write) else: curr_write = curr_acl['perms'].get('write', []) if curr_write: postargs['perms.write'] = ','.join(curr_write) if path.endswith('/acl'): # Allow ownership to be reset only at entity level. postargs['owner'] = owner or curr_acl['owner'] postargs['sharing'] = curr_acl['sharing'] try: content = self._rest_client.post( path, body=binding._encode(**postargs), output_mode='json').body.read() except binding.HTTPError as e: if e.status != 404: raise raise ACLException('Invalid endpoint: %s.', path) return json.loads(content)['entry'][0]['acl']
mit
tomchristie/django
tests/template_tests/syntax_tests/test_for.py
25
10025
from django.template import TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup class ForTagTests(SimpleTestCase): libraries = {'custom': 'template_tests.templatetags.custom'} @setup({'for-tag01': '{% for val in values %}{{ val }}{% endfor %}'}) def test_for_tag01(self): output = self.engine.render_to_string('for-tag01', {'values': [1, 2, 3]}) self.assertEqual(output, '123') @setup({'for-tag02': '{% for val in values reversed %}{{ val }}{% endfor %}'}) def test_for_tag02(self): output = self.engine.render_to_string('for-tag02', {'values': [1, 2, 3]}) self.assertEqual(output, '321') @setup({'for-tag-vars01': '{% for val in values %}{{ forloop.counter }}{% endfor %}'}) def test_for_tag_vars01(self): output = self.engine.render_to_string('for-tag-vars01', {'values': [6, 6, 6]}) self.assertEqual(output, '123') @setup({'for-tag-vars02': '{% for val in values %}{{ forloop.counter0 }}{% endfor %}'}) def test_for_tag_vars02(self): output = self.engine.render_to_string('for-tag-vars02', {'values': [6, 6, 6]}) self.assertEqual(output, '012') @setup({'for-tag-vars03': '{% for val in values %}{{ forloop.revcounter }}{% endfor %}'}) def test_for_tag_vars03(self): output = self.engine.render_to_string('for-tag-vars03', {'values': [6, 6, 6]}) self.assertEqual(output, '321') @setup({'for-tag-vars04': '{% for val in values %}{{ forloop.revcounter0 }}{% endfor %}'}) def test_for_tag_vars04(self): output = self.engine.render_to_string('for-tag-vars04', {'values': [6, 6, 6]}) self.assertEqual(output, '210') @setup({'for-tag-vars05': '{% for val in values %}' '{% if forloop.first %}f{% else %}x{% endif %}{% endfor %}'}) def test_for_tag_vars05(self): output = self.engine.render_to_string('for-tag-vars05', {'values': [6, 6, 6]}) self.assertEqual(output, 'fxx') @setup({'for-tag-vars06': '{% for val in values %}' '{% if forloop.last %}l{% else %}x{% endif %}{% endfor %}'}) def test_for_tag_vars06(self): output = self.engine.render_to_string('for-tag-vars06', {'values': [6, 6, 6]}) self.assertEqual(output, 'xxl') @setup({'for-tag-unpack01': '{% for key,value in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack01(self): output = self.engine.render_to_string('for-tag-unpack01', {'items': (('one', 1), ('two', 2))}) self.assertEqual(output, 'one:1/two:2/') @setup({'for-tag-unpack03': '{% for key, value in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack03(self): output = self.engine.render_to_string('for-tag-unpack03', {'items': (('one', 1), ('two', 2))}) self.assertEqual(output, 'one:1/two:2/') @setup({'for-tag-unpack04': '{% for key , value in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack04(self): output = self.engine.render_to_string('for-tag-unpack04', {'items': (('one', 1), ('two', 2))}) self.assertEqual(output, 'one:1/two:2/') @setup({'for-tag-unpack05': '{% for key ,value in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack05(self): output = self.engine.render_to_string('for-tag-unpack05', {'items': (('one', 1), ('two', 2))}) self.assertEqual(output, 'one:1/two:2/') @setup({'for-tag-unpack06': '{% for key value in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack06(self): msg = "'for' tag received an invalid argument: for key value in items" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('for-tag-unpack06', {'items': (('one', 1), ('two', 2))}) @setup({'for-tag-unpack07': '{% for key,,value in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack07(self): msg = "'for' tag received an invalid argument: for key,,value in items" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('for-tag-unpack07', {'items': (('one', 1), ('two', 2))}) @setup({'for-tag-unpack08': '{% for key,value, in items %}{{ key }}:{{ value }}/{% endfor %}'}) def test_for_tag_unpack08(self): msg = "'for' tag received an invalid argument: for key,value, in items" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('for-tag-unpack08', {'items': (('one', 1), ('two', 2))}) @setup({'double-quote': '{% for "k" in items %}{{ "k" }}/{% endfor %}'}) def test_unpack_double_quote(self): msg = """'for' tag received an invalid argument: for "k" in items""" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('double-quote', {'items': (1, 2)}) @setup({'single-quote': "{% for 'k' in items %}{{ k }}/{% endfor %}"}) def test_unpack_single_quote(self): msg = """'for' tag received an invalid argument: for 'k' in items""" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('single-quote', {'items': (1, 2)}) @setup({'vertical-bar': '{% for k|upper in items %}{{ k|upper }}/{% endfor %}'}) def test_unpack_vertical_bar(self): msg = "'for' tag received an invalid argument: for k|upper in items" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('vertical-bar', {'items': (1, 2)}) @setup({'for-tag-unpack09': '{% for val in items %}{{ val.0 }}:{{ val.1 }}/{% endfor %}'}) def test_for_tag_unpack09(self): """ A single loopvar doesn't truncate the list in val. """ output = self.engine.render_to_string('for-tag-unpack09', {'items': (('one', 1), ('two', 2))}) self.assertEqual(output, 'one:1/two:2/') @setup({'for-tag-unpack13': '{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}'}) def test_for_tag_unpack13(self): output = self.engine.render_to_string( 'for-tag-unpack13', {'items': (('one', 1, 'carrot'), ('two', 2, 'cheese'))} ) if self.engine.string_if_invalid: self.assertEqual(output, 'one:1,carrot/two:2,cheese/') else: self.assertEqual(output, 'one:1,carrot/two:2,cheese/') @setup({'for-tag-empty01': '{% for val in values %}{{ val }}{% empty %}empty text{% endfor %}'}) def test_for_tag_empty01(self): output = self.engine.render_to_string('for-tag-empty01', {'values': [1, 2, 3]}) self.assertEqual(output, '123') @setup({'for-tag-empty02': '{% for val in values %}{{ val }}{% empty %}values array empty{% endfor %}'}) def test_for_tag_empty02(self): output = self.engine.render_to_string('for-tag-empty02', {'values': []}) self.assertEqual(output, 'values array empty') @setup({'for-tag-empty03': '{% for val in values %}' '{{ val }}{% empty %}values array not found{% endfor %}'}) def test_for_tag_empty03(self): output = self.engine.render_to_string('for-tag-empty03') self.assertEqual(output, 'values array not found') @setup({'for-tag-filter-ws': "{% load custom %}{% for x in s|noop:'x y' %}{{ x }}{% endfor %}"}) def test_for_tag_filter_ws(self): """ #19882 """ output = self.engine.render_to_string('for-tag-filter-ws', {'s': 'abc'}) self.assertEqual(output, 'abc') @setup({'for-tag-unpack-strs': '{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}'}) def test_for_tag_unpack_strs(self): output = self.engine.render_to_string('for-tag-unpack-strs', {'items': ('ab', 'ac')}) self.assertEqual(output, 'a:b/a:c/') @setup({'for-tag-unpack10': '{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}'}) def test_for_tag_unpack10(self): with self.assertRaisesMessage(ValueError, 'Need 2 values to unpack in for loop; got 3.'): self.engine.render_to_string( 'for-tag-unpack10', {'items': (('one', 1, 'carrot'), ('two', 2, 'orange'))}, ) @setup({'for-tag-unpack11': '{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}'}) def test_for_tag_unpack11(self): with self.assertRaisesMessage(ValueError, 'Need 3 values to unpack in for loop; got 2.'): self.engine.render_to_string( 'for-tag-unpack11', {'items': (('one', 1), ('two', 2))}, ) @setup({'for-tag-unpack12': '{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}'}) def test_for_tag_unpack12(self): with self.assertRaisesMessage(ValueError, 'Need 3 values to unpack in for loop; got 2.'): self.engine.render_to_string( 'for-tag-unpack12', {'items': (('one', 1, 'carrot'), ('two', 2))} ) @setup({'for-tag-unpack14': '{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}'}) def test_for_tag_unpack14(self): with self.assertRaisesMessage(ValueError, 'Need 2 values to unpack in for loop; got 1.'): self.engine.render_to_string('for-tag-unpack14', {'items': (1, 2)}) @setup({ 'main': '{% with alpha=alpha.values %}{% include "base" %}{% endwith %}_' '{% with alpha=alpha.extra %}{% include "base" %}{% endwith %}', 'base': '{% for x, y in alpha %}{{ x }}:{{ y }},{% endfor %}' }) def test_for_tag_context(self): """ ForNode.render() pops the values it pushes to the context (#28001). """ output = self.engine.render_to_string('main', { 'alpha': { 'values': [('two', 2), ('four', 4)], 'extra': [('six', 6), ('eight', 8)], }, }) self.assertEqual(output, 'two:2,four:4,_six:6,eight:8,')
bsd-3-clause
doganov/edx-platform
common/djangoapps/third_party_auth/middleware.py
169
1053
"""Middleware classes for third_party_auth.""" from social.apps.django_app.middleware import SocialAuthExceptionMiddleware from . import pipeline class ExceptionMiddleware(SocialAuthExceptionMiddleware): """Custom middleware that handles conditional redirection.""" def get_redirect_uri(self, request, exception): # Fall back to django settings's SOCIAL_AUTH_LOGIN_ERROR_URL. redirect_uri = super(ExceptionMiddleware, self).get_redirect_uri(request, exception) # Safe because it's already been validated by # pipeline.parse_query_params. If that pipeline step ever moves later # in the pipeline stack, we'd need to validate this value because it # would be an injection point for attacker data. auth_entry = request.session.get(pipeline.AUTH_ENTRY_KEY) # Check if we have an auth entry key we can use instead if auth_entry and auth_entry in pipeline.AUTH_DISPATCH_URLS: redirect_uri = pipeline.AUTH_DISPATCH_URLS[auth_entry] return redirect_uri
agpl-3.0
Ernesto99/odoo
openerp/modules/module.py
199
16207
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import functools import imp import importlib import inspect import itertools import logging import os import re import sys import time import unittest import threading from os.path import join as opj import unittest2 import openerp import openerp.tools as tools import openerp.release as release from openerp.tools.safe_eval import safe_eval as eval MANIFEST = '__openerp__.py' README = ['README.rst', 'README.md', 'README.txt'] _logger = logging.getLogger(__name__) # addons path as a list ad_paths = [] hooked = False # Modules already loaded loaded = [] class AddonsImportHook(object): """ Import hook to load OpenERP addons from multiple paths. OpenERP implements its own import-hook to load its addons. OpenERP addons are Python modules. Originally, they were each living in their own top-level namespace, e.g. the sale module, or the hr module. For backward compatibility, `import <module>` is still supported. Now they are living in `openerp.addons`. The good way to import such modules is thus `import openerp.addons.module`. """ def find_module(self, module_name, package_path): module_parts = module_name.split('.') if len(module_parts) == 3 and module_name.startswith('openerp.addons.'): return self # We act as a loader too. def load_module(self, module_name): if module_name in sys.modules: return sys.modules[module_name] _1, _2, module_part = module_name.split('.') # Note: we don't support circular import. f, path, descr = imp.find_module(module_part, ad_paths) mod = imp.load_module('openerp.addons.' + module_part, f, path, descr) sys.modules['openerp.addons.' + module_part] = mod return mod def initialize_sys_path(): """ Setup an import-hook to be able to import OpenERP addons from the different addons paths. This ensures something like ``import crm`` (or even ``import openerp.addons.crm``) works even if the addons are not in the PYTHONPATH. """ global ad_paths global hooked dd = tools.config.addons_data_dir if dd not in ad_paths: ad_paths.append(dd) for ad in tools.config['addons_path'].split(','): ad = os.path.abspath(tools.ustr(ad.strip())) if ad not in ad_paths: ad_paths.append(ad) # add base module path base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons')) if base_path not in ad_paths: ad_paths.append(base_path) if not hooked: sys.meta_path.append(AddonsImportHook()) hooked = True def get_module_path(module, downloaded=False, display_warning=True): """Return the path of the given module. Search the addons paths and return the first path where the given module is found. If downloaded is True, return the default addons path if nothing else is found. """ initialize_sys_path() for adp in ad_paths: if os.path.exists(opj(adp, module)) or os.path.exists(opj(adp, '%s.zip' % module)): return opj(adp, module) if downloaded: return opj(tools.config.addons_data_dir, module) if display_warning: _logger.warning('module %s: module not found', module) return False def get_module_filetree(module, dir='.'): path = get_module_path(module) if not path: return False dir = os.path.normpath(dir) if dir == '.': dir = '' if dir.startswith('..') or (dir and dir[0] == '/'): raise Exception('Cannot access file outside the module') files = openerp.tools.osutil.listdir(path, True) tree = {} for f in files: if not f.startswith(dir): continue if dir: f = f[len(dir)+int(not dir.endswith('/')):] lst = f.split(os.sep) current = tree while len(lst) != 1: current = current.setdefault(lst.pop(0), {}) current[lst.pop(0)] = None return tree def get_module_resource(module, *args): """Return the full path of a resource of the given module. :param module: module name :param list(str) args: resource path components within module :rtype: str :return: absolute path to the resource TODO name it get_resource_path TODO make it available inside on osv object (self.get_resource_path) """ mod_path = get_module_path(module) if not mod_path: return False resource_path = opj(mod_path, *args) if os.path.isdir(mod_path): # the module is a directory - ignore zip behavior if os.path.exists(resource_path): return resource_path return False def get_module_icon(module): iconpath = ['static', 'description', 'icon.png'] if get_module_resource(module, *iconpath): return ('/' + module + '/') + '/'.join(iconpath) return '/base/' + '/'.join(iconpath) def get_module_root(path): """ Get closest module's root begining from path # Given: # /foo/bar/module_dir/static/src/... get_module_root('/foo/bar/module_dir/static/') # returns '/foo/bar/module_dir' get_module_root('/foo/bar/module_dir/') # returns '/foo/bar/module_dir' get_module_root('/foo/bar') # returns None @param path: Path from which the lookup should start @return: Module root path or None if not found """ while not os.path.exists(os.path.join(path, MANIFEST)): new_path = os.path.abspath(os.path.join(path, os.pardir)) if path == new_path: return None path = new_path return path def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module) terp_file = mod_path and opj(mod_path, MANIFEST) or False if terp_file: info = {} if os.path.isfile(terp_file): # default values for descriptor info = { 'application': False, 'author': '', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'AGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'website': '', 'sequence': 100, 'summary': '', } info.update(itertools.izip( 'depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(terp_file) try: info.update(eval(f.read())) finally: f.close() if not info.get('description'): readme_path = [opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x))] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text if 'active' in info: # 'active' has been renamed 'auto_install' info['auto_install'] = info['active'] info['version'] = adapt_version(info['version']) return info #TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 _logger.debug('module %s: no %s file found.', module, MANIFEST) return {} def init_module_models(cr, module_name, obj_list): """ Initialize a list of models. Call _auto_init and init on each model to create or update the database tables supporting the models. TODO better explanation of _auto_init and init. """ _logger.info('module %s: creating or updating database tables', module_name) todo = [] for obj in obj_list: result = obj._auto_init(cr, {'module': module_name}) if result: todo += result if hasattr(obj, 'init'): obj.init(cr) cr.commit() for obj in obj_list: obj._auto_end(cr, {'module': module_name}) cr.commit() todo.sort(key=lambda x: x[0]) for t in todo: t[1](cr, *t[2]) cr.commit() def load_openerp_module(module_name): """ Load an OpenERP module, if not already loaded. This loads the module and register all of its models, thanks to either the MetaModel metaclass, or the explicit instantiation of the model. This is also used to load server-wide module (i.e. it is also used when there is no model to register). """ global loaded if module_name in loaded: return initialize_sys_path() try: mod_path = get_module_path(module_name) __import__('openerp.addons.' + module_name) # Call the module's post-load hook. This can done before any model or # data has been initialized. This is ok as the post-load hook is for # server-wide (instead of registry-specific) functionalities. info = load_information_from_description_file(module_name) if info['post_load']: getattr(sys.modules['openerp.addons.' + module_name], info['post_load'])() except Exception, e: msg = "Couldn't load module %s" % (module_name) _logger.critical(msg) _logger.critical(e) raise else: loaded.append(module_name) def get_modules(): """Returns the list of module names """ def listdir(dir): def clean(name): name = os.path.basename(name) if name[-4:] == '.zip': name = name[:-4] return name def is_really_module(name): manifest_name = opj(dir, name, MANIFEST) zipfile_name = opj(dir, name) return os.path.isfile(manifest_name) return map(clean, filter(is_really_module, os.listdir(dir))) plist = [] initialize_sys_path() for ad in ad_paths: plist.extend(listdir(ad)) return list(set(plist)) def get_modules_with_version(): modules = get_modules() res = dict.fromkeys(modules, adapt_version('1.0')) for module in modules: try: info = load_information_from_description_file(module) res[module] = info['version'] except Exception: continue return res def adapt_version(version): serie = release.major_version if version == serie or not version.startswith(serie + '.'): version = '%s.%s' % (serie, version) return version def get_test_modules(module): """ Return a list of module for the addons potentially containing tests to feed unittest2.TestLoader.loadTestsFromModule() """ # Try to import the module modpath = 'openerp.addons.' + module try: mod = importlib.import_module('.tests', modpath) except Exception, e: # If module has no `tests` sub-module, no problem. if str(e) != 'No module named tests': _logger.exception('Can not `import %s`.', module) return [] if hasattr(mod, 'fast_suite') or hasattr(mod, 'checks'): _logger.warn( "Found deprecated fast_suite or checks attribute in test module " "%s. These have no effect in or after version 8.0.", mod.__name__) result = [mod_obj for name, mod_obj in inspect.getmembers(mod, inspect.ismodule) if name.startswith('test_')] return result # Use a custom stream object to log the test executions. class TestStream(object): def __init__(self, logger_name='openerp.tests'): self.logger = logging.getLogger(logger_name) self.r = re.compile(r'^-*$|^ *... *$|^ok$') def flush(self): pass def write(self, s): if self.r.match(s): return first = True level = logging.ERROR if s.startswith(('ERROR', 'FAIL', 'Traceback')) else logging.INFO for c in s.splitlines(): if not first: c = '` ' + c first = False self.logger.log(level, c) current_test = None def runs_at(test, hook, default): # by default, tests do not run post install test_runs = getattr(test, hook, default) # for a test suite, we're done if not isinstance(test, unittest.TestCase): return test_runs # otherwise check the current test method to see it's been set to a # different state method = getattr(test, test._testMethodName) return getattr(method, hook, test_runs) runs_at_install = functools.partial(runs_at, hook='at_install', default=True) runs_post_install = functools.partial(runs_at, hook='post_install', default=False) def run_unit_tests(module_name, dbname, position=runs_at_install): """ :returns: ``True`` if all of ``module_name``'s tests succeeded, ``False`` if any of them failed. :rtype: bool """ global current_test current_test = module_name mods = get_test_modules(module_name) threading.currentThread().testing = True r = True for m in mods: tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m)) suite = unittest2.TestSuite(itertools.ifilter(position, tests)) if suite.countTestCases(): t0 = time.time() t0_sql = openerp.sql_db.sql_counter _logger.info('%s running tests.', m.__name__) result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite) if time.time() - t0 > 5: _logger.log(25, "%s tested in %.2fs, %s queries", m.__name__, time.time() - t0, openerp.sql_db.sql_counter - t0_sql) if not result.wasSuccessful(): r = False _logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors)) current_test = None threading.currentThread().testing = False return r def unwrap_suite(test): """ Attempts to unpack testsuites (holding suites or cases) in order to generate a single stream of terminals (either test cases or customized test suites). These can then be checked for run/skip attributes individually. An alternative would be to use a variant of @unittest2.skipIf with a state flag of some sort e.g. @unittest2.skipIf(common.runstate != 'at_install'), but then things become weird with post_install as tests should *not* run by default there """ if isinstance(test, unittest.TestCase): yield test return subtests = list(test) # custom test suite (no test cases) if not len(subtests): yield test return for item in itertools.chain.from_iterable( itertools.imap(unwrap_suite, subtests)): yield item # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ashleyjune/SM-G360T1_kernel
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
babble/babble
include/jython/Lib/test/test_jser2.py
1
2399
from test import test_support import unittest import java from org.python.util import PythonObjectInputStream def serialize(o, special=0): b = java.io.ByteArrayOutputStream() objs = java.io.ObjectOutputStream(b) objs.writeObject(o) if not special: OIS = java.io.ObjectInputStream else: OIS = PythonObjectInputStream objs = OIS(java.io.ByteArrayInputStream(b.toByteArray())) return objs.readObject() from jser2_classes import A, AJ, N, NL, NT class TestJavaSerialisation(unittest.TestCase): def test_list(self): l = [1,"a", 3.0] l1 = serialize(l) self.assertEqual(l, l1) def test_dict(self): d = {'a': 3.0} d1 = serialize(d) self.assertEqual(d, d1) def test_tuple(self): t = (1, 'a') t1 = serialize(t) self.assertEqual(t, t1) def test_oldstyle(self): a = A('x') a1 = serialize(a) self.assertEqual(a, a1) # wasn't working in 2.1 either #def test_oldstyle_cls(self): # A1 = serialize(A) # self.assert_(A is A1) def test_jsubcl(self): aj = AJ('x') aj1 = serialize(aj, special=1) self.assertEqual(aj, aj1) def test_singletons(self): for v in (None, Ellipsis): v1 = serialize(v) self.assert_(v is v1) v1 = serialize((v,))[0] self.assert_(v is v1) def test_NotImplemented(self): # XXX serialize(NotImplemented) is None because of __tojava__ v1 = serialize((NotImplemented,))[0] self.assert_(v1 is NotImplemented) def test_type(self): list1 = serialize(list) self.assert_(list1 is list) list1 = serialize((list,))[0] self.assert_(list1 is list) def test_user_type(self): N1 = serialize(N) self.assert_(N1 is N) N1 = serialize((N,))[0] self.assert_(N1 is N) def test_newstyle(self): n = N('x') n1 = serialize(n) self.assertEqual(n, n1) def test_newstyle_list(self): nl = NL('x',1,2,3) nl1 = serialize(nl) self.assertEqual(nl, nl1) def test_newstyle_tuple(self): nt = NT('x',1,2,3) nt1 = serialize(nt) self.assertEqual(nt, nt1) def test_main(): test_support.run_unittest(TestJavaSerialisation) if __name__ == "__main__": test_main()
apache-2.0
facebookPagesSearch/sample-code
sample-code/examples/python/android_simple.py
36
1787
import os from time import sleep import unittest from appium import webdriver # Returns abs path relative to this file and not cwd PATH = lambda p: os.path.abspath( os.path.join(os.path.dirname(__file__), p) ) class SimpleAndroidTests(unittest.TestCase): def setUp(self): desired_caps = {} desired_caps['platformName'] = 'Android' desired_caps['platformVersion'] = '4.2' desired_caps['deviceName'] = 'Android Emulator' desired_caps['app'] = PATH( '../../../sample-code/apps/ApiDemos/bin/ApiDemos-debug.apk' ) self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) def tearDown(self): # end the session self.driver.quit() def test_find_elements(self): el = self.driver.find_element_by_accessibility_id('Graphics') el.click() el = self.driver.find_element_by_accessibility_id('Arcs') self.assertIsNotNone(el) self.driver.back() el = self.driver.find_element_by_accessibility_id("App") self.assertIsNotNone(el) els = self.driver.find_elements_by_android_uiautomator("new UiSelector().clickable(true)") self.assertGreaterEqual(12, len(els)) self.driver.find_element_by_android_uiautomator('text("API Demos")') def test_simple_actions(self): el = self.driver.find_element_by_accessibility_id('Graphics') el.click() el = self.driver.find_element_by_accessibility_id('Arcs') el.click() self.driver.find_element_by_android_uiautomator('new UiSelector().text("Graphics/Arcs")') if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(SimpleAndroidTests) unittest.TextTestRunner(verbosity=2).run(suite)
apache-2.0
Pluto-tv/chromium-crosswalk
tools/telemetry/third_party/gsutilz/gslib/__main__.py
12
26565
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main module for Google Cloud Storage command line tool.""" from __future__ import absolute_import import ConfigParser import datetime import errno import getopt import logging import os import re import signal import socket import sys import textwrap import traceback # Load the gsutil version number and append it to boto.UserAgent so the value is # set before anything instantiates boto. This has to run after THIRD_PARTY_DIR # is modified (done in gsutil.py) but before any calls are made that would cause # boto.s3.Connection to be loaded - otherwise the Connection class would end up # with a static reference to the pre-modified version of the UserAgent field, # so boto requests would not include gsutil/version# in the UserAgent string. import boto import gslib # TODO: gsutil-beta: Cloud SDK scans for this string and performs # substitution; ensure this works with both apitools and boto. boto.UserAgent += ' gsutil/%s (%s)' % (gslib.VERSION, sys.platform) if os.environ.get('CLOUDSDK_WRAPPER') == '1': boto.UserAgent += ' Cloud SDK Command Line Tool' if os.environ.get('CLOUDSDK_VERSION'): boto.UserAgent += ' %s' % os.environ.get('CLOUDSDK_VERSION') # pylint: disable=g-bad-import-order # pylint: disable=g-import-not-at-top import httplib2 import oauth2client from gslib import wildcard_iterator from gslib.cloud_api import AccessDeniedException from gslib.cloud_api import ArgumentException from gslib.cloud_api import BadRequestException from gslib.cloud_api import ProjectIdException from gslib.cloud_api import ServiceException from gslib.command_runner import CommandRunner import gslib.exception from gslib.exception import CommandException import apitools.base.py.exceptions as apitools_exceptions from gslib.util import CreateLock from gslib.util import GetBotoConfigFileList from gslib.util import GetCertsFile from gslib.util import GetCleanupFiles from gslib.util import GsutilStreamHandler from gslib.util import ProxyInfoFromEnvironmentVar from gslib.sig_handling import GetCaughtSignals from gslib.sig_handling import InitializeSignalHandling from gslib.sig_handling import RegisterSignalHandler GSUTIL_CLIENT_ID = '909320924072.apps.googleusercontent.com' # Google OAuth2 clients always have a secret, even if the client is an installed # application/utility such as gsutil. Of course, in such cases the "secret" is # actually publicly known; security depends entirely on the secrecy of refresh # tokens, which effectively become bearer tokens. GSUTIL_CLIENT_NOTSOSECRET = 'p3RlpR10xMFh9ZXBS/ZNLYUu' if os.environ.get('CLOUDSDK_WRAPPER') == '1': # Cloud SDK installs have a separate client ID / secret. GSUTIL_CLIENT_ID = '32555940559.apps.googleusercontent.com' GSUTIL_CLIENT_NOTSOSECRET = 'ZmssLNjJy2998hD4CTg2ejr2' CONFIG_KEYS_TO_REDACT = ['proxy', 'proxy_port', 'proxy_user', 'proxy_pass'] # We don't use the oauth2 authentication plugin directly; importing it here # ensures that it's loaded and available by default when an operation requiring # authentication is performed. try: # pylint: disable=unused-import,g-import-not-at-top import gcs_oauth2_boto_plugin except ImportError: pass DEBUG_WARNING = """ ***************************** WARNING ***************************** *** You are running gsutil with debug output enabled. *** Be aware that debug output includes authentication credentials. *** Make sure to remove the value of the Authorization header for *** each HTTP request printed to the console prior to posting to *** a public medium such as a forum post or Stack Overflow. ***************************** WARNING ***************************** """.lstrip() HTTP_WARNING = """ ***************************** WARNING ***************************** *** You are running gsutil with the "https_validate_certificates" config *** variable set to False. This option should always be set to True in *** production environments to protect against man-in-the-middle attacks, *** and leaking of user data. ***************************** WARNING ***************************** """.lstrip() debug = 0 test_exception_traces = False # pylint: disable=unused-argument def _CleanupSignalHandler(signal_num, cur_stack_frame): """Cleans up if process is killed with SIGINT, SIGQUIT or SIGTERM.""" _Cleanup() def _Cleanup(): for fname in GetCleanupFiles(): try: os.unlink(fname) except: # pylint: disable=bare-except pass def _OutputAndExit(message): """Outputs message and exists with code 1.""" from gslib.util import UTF8 # pylint: disable=g-import-not-at-top if debug >= 2 or test_exception_traces: stack_trace = traceback.format_exc() err = ('DEBUG: Exception stack trace:\n %s\n' % re.sub('\\n', '\n ', stack_trace)) else: err = '%s\n' % message try: sys.stderr.write(err.encode(UTF8)) except UnicodeDecodeError: # Can happen when outputting invalid Unicode filenames. sys.stderr.write(err) sys.exit(1) def _OutputUsageAndExit(command_runner): command_runner.RunNamedCommand('help') sys.exit(1) class GsutilFormatter(logging.Formatter): """A logging.Formatter that supports logging microseconds (%f).""" def formatTime(self, record, datefmt=None): if datefmt: return datetime.datetime.fromtimestamp(record.created).strftime(datefmt) # Use default implementation if datefmt is not specified. return super(GsutilFormatter, self).formatTime(record, datefmt=datefmt) def _ConfigureLogging(level=logging.INFO): """Similar to logging.basicConfig() except it always adds a handler.""" log_format = '%(levelname)s %(asctime)s %(filename)s] %(message)s' date_format = '%m%d %H:%M:%S.%f' formatter = GsutilFormatter(fmt=log_format, datefmt=date_format) handler = GsutilStreamHandler() handler.setFormatter(formatter) root_logger = logging.getLogger() root_logger.addHandler(handler) root_logger.setLevel(level) def main(): InitializeSignalHandling() # Any modules used in initializing multiprocessing variables must be # imported after importing gslib.__main__. # pylint: disable=redefined-outer-name,g-import-not-at-top import gslib.boto_translation import gslib.command import gslib.util from gslib.util import BOTO_IS_SECURE from gslib.util import CERTIFICATE_VALIDATION_ENABLED # pylint: disable=unused-variable from gcs_oauth2_boto_plugin import oauth2_client # pylint: enable=unused-variable from gslib.util import MultiprocessingIsAvailable if MultiprocessingIsAvailable()[0]: # These setup methods must be called, and, on Windows, they can only be # called from within an "if __name__ == '__main__':" block. gslib.util.InitializeMultiprocessingVariables() gslib.command.InitializeMultiprocessingVariables() gslib.boto_translation.InitializeMultiprocessingVariables() # This needs to be done after gslib.util.InitializeMultiprocessingVariables(), # since otherwise we can't call gslib.util.CreateLock. try: # pylint: disable=unused-import,g-import-not-at-top import gcs_oauth2_boto_plugin gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret( GSUTIL_CLIENT_ID, GSUTIL_CLIENT_NOTSOSECRET) gcs_oauth2_boto_plugin.oauth2_helper.SetLock(CreateLock()) except ImportError: pass global debug global test_exception_traces if not (2, 6) <= sys.version_info[:3] < (3,): raise gslib.exception.CommandException( 'gsutil requires python 2.6 or 2.7.') # In gsutil 4.0 and beyond, we don't use the boto library for the JSON # API. However, we still store gsutil configuration data in the .boto # config file for compatibility with previous versions and user convenience. # Many users have a .boto configuration file from previous versions, and it # is useful to have all of the configuration for gsutil stored in one place. command_runner = CommandRunner() if not BOTO_IS_SECURE: raise CommandException('\n'.join(textwrap.wrap( 'Your boto configuration has is_secure = False. Gsutil cannot be ' 'run this way, for security reasons.'))) headers = {} parallel_operations = False quiet = False version = False debug = 0 test_exception_traces = False # If user enters no commands just print the usage info. if len(sys.argv) == 1: sys.argv.append('help') # Change the default of the 'https_validate_certificates' boto option to # True (it is currently False in boto). if not boto.config.has_option('Boto', 'https_validate_certificates'): if not boto.config.has_section('Boto'): boto.config.add_section('Boto') boto.config.setbool('Boto', 'https_validate_certificates', True) gslib.util.certs_file_lock = CreateLock() for signal_num in GetCaughtSignals(): RegisterSignalHandler(signal_num, _CleanupSignalHandler) GetCertsFile() try: try: opts, args = getopt.getopt(sys.argv[1:], 'dDvo:h:mq', ['debug', 'detailedDebug', 'version', 'option', 'help', 'header', 'multithreaded', 'quiet', 'testexceptiontraces']) except getopt.GetoptError as e: _HandleCommandException(gslib.exception.CommandException(e.msg)) for o, a in opts: if o in ('-d', '--debug'): # Passing debug=2 causes boto to include httplib header output. debug = 3 elif o in ('-D', '--detailedDebug'): # We use debug level 3 to ask gsutil code to output more detailed # debug output. This is a bit of a hack since it overloads the same # flag that was originally implemented for boto use. And we use -DD # to ask for really detailed debugging (i.e., including HTTP payload). if debug == 3: debug = 4 else: debug = 3 elif o in ('-?', '--help'): _OutputUsageAndExit(command_runner) elif o in ('-h', '--header'): (hdr_name, _, hdr_val) = a.partition(':') if not hdr_name: _OutputUsageAndExit(command_runner) headers[hdr_name.lower()] = hdr_val elif o in ('-m', '--multithreaded'): parallel_operations = True elif o in ('-q', '--quiet'): quiet = True elif o in ('-v', '--version'): version = True elif o == '--testexceptiontraces': # Hidden flag for integration tests. test_exception_traces = True elif o in ('-o', '--option'): (opt_section_name, _, opt_value) = a.partition('=') if not opt_section_name: _OutputUsageAndExit(command_runner) (opt_section, _, opt_name) = opt_section_name.partition(':') if not opt_section or not opt_name: _OutputUsageAndExit(command_runner) if not boto.config.has_section(opt_section): boto.config.add_section(opt_section) boto.config.set(opt_section, opt_name, opt_value) httplib2.debuglevel = debug if debug > 1: sys.stderr.write(DEBUG_WARNING) if debug >= 2: _ConfigureLogging(level=logging.DEBUG) command_runner.RunNamedCommand('ver', ['-l']) config_items = [] try: config_items.extend(boto.config.items('Boto')) config_items.extend(boto.config.items('GSUtil')) except ConfigParser.NoSectionError: pass for i in xrange(len(config_items)): config_item_key = config_items[i][0] if config_item_key in CONFIG_KEYS_TO_REDACT: config_items[i] = (config_item_key, 'REDACTED') sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv)) sys.stderr.write('config_file_list: %s\n' % GetBotoConfigFileList()) sys.stderr.write('config: %s\n' % str(config_items)) elif quiet: _ConfigureLogging(level=logging.WARNING) else: _ConfigureLogging(level=logging.INFO) # oauth2client uses info logging in places that would better # correspond to gsutil's debug logging (e.g., when refreshing # access tokens). oauth2client.client.logger.setLevel(logging.WARNING) if not CERTIFICATE_VALIDATION_ENABLED: sys.stderr.write(HTTP_WARNING) if version: command_name = 'version' elif not args: command_name = 'help' else: command_name = args[0] _CheckAndWarnForProxyDifferences() if os.environ.get('_ARGCOMPLETE', '0') == '1': return _PerformTabCompletion(command_runner) return _RunNamedCommandAndHandleExceptions( command_runner, command_name, args=args[1:], headers=headers, debug_level=debug, parallel_operations=parallel_operations) finally: _Cleanup() def _CheckAndWarnForProxyDifferences(): # If there are both boto config and environment variable config present for # proxies, unset the environment variable and warn if it differs. boto_port = boto.config.getint('Boto', 'proxy_port', 0) if boto.config.get('Boto', 'proxy', None) or boto_port: for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']: if proxy_env_var in os.environ and os.environ[proxy_env_var]: differing_values = [] proxy_info = ProxyInfoFromEnvironmentVar(proxy_env_var) if proxy_info.proxy_host != boto.config.get('Boto', 'proxy', None): differing_values.append( 'Boto proxy host: "%s" differs from %s proxy host: "%s"' % (boto.config.get('Boto', 'proxy', None), proxy_env_var, proxy_info.proxy_host)) if (proxy_info.proxy_user != boto.config.get('Boto', 'proxy_user', None)): differing_values.append( 'Boto proxy user: "%s" differs from %s proxy user: "%s"' % (boto.config.get('Boto', 'proxy_user', None), proxy_env_var, proxy_info.proxy_user)) if (proxy_info.proxy_pass != boto.config.get('Boto', 'proxy_pass', None)): differing_values.append( 'Boto proxy password differs from %s proxy password' % proxy_env_var) # Only compare ports if at least one is present, since the # boto logic for selecting default ports has not yet executed. if ((proxy_info.proxy_port or boto_port) and proxy_info.proxy_port != boto_port): differing_values.append( 'Boto proxy port: "%s" differs from %s proxy port: "%s"' % (boto_port, proxy_env_var, proxy_info.proxy_port)) if differing_values: sys.stderr.write('\n'.join(textwrap.wrap( 'WARNING: Proxy configuration is present in both the %s ' 'environment variable and boto configuration, but ' 'configuration differs. boto configuration proxy values will ' 'be used. Differences detected:' % proxy_env_var))) sys.stderr.write('\n%s\n' % '\n'.join(differing_values)) # Regardless of whether the proxy configuration values matched, # delete the environment variable so as not to confuse boto. del os.environ[proxy_env_var] def _HandleUnknownFailure(e): # Called if we fall through all known/handled exceptions. Allows us to # print a stacktrace if -D option used. if debug >= 2: stack_trace = traceback.format_exc() sys.stderr.write('DEBUG: Exception stack trace:\n %s\n' % re.sub('\\n', '\n ', stack_trace)) else: _OutputAndExit('Failure: %s.' % e) def _HandleCommandException(e): if e.informational: _OutputAndExit(e.reason) else: _OutputAndExit('CommandException: %s' % e.reason) # pylint: disable=unused-argument def _HandleControlC(signal_num, cur_stack_frame): """Called when user hits ^C. This function prints a brief message instead of the normal Python stack trace (unless -D option is used). Args: signal_num: Signal that was caught. cur_stack_frame: Unused. """ if debug >= 2: stack_trace = ''.join(traceback.format_list(traceback.extract_stack())) _OutputAndExit( 'DEBUG: Caught signal %d - Exception stack trace:\n' ' %s' % (signal_num, re.sub('\\n', '\n ', stack_trace))) else: _OutputAndExit('Caught signal %d - exiting' % signal_num) def _HandleSigQuit(signal_num, cur_stack_frame): """Called when user hits ^\\, so we can force breakpoint a running gsutil.""" import pdb # pylint: disable=g-import-not-at-top pdb.set_trace() def _ConstructAccountProblemHelp(reason): """Constructs a help string for an access control error. Args: reason: e.reason string from caught exception. Returns: Contructed help text. """ default_project_id = boto.config.get_value('GSUtil', 'default_project_id') # pylint: disable=line-too-long, g-inconsistent-quotes acct_help = ( "Your request resulted in an AccountProblem (403) error. Usually this " "happens if you attempt to create a bucket without first having " "enabled billing for the project you are using. Please ensure billing is " "enabled for your project by following the instructions at " "`Google Developers Console<https://developers.google.com/console/help/billing>`. ") if default_project_id: acct_help += ( "In the project overview, ensure that the Project Number listed for " "your project matches the project ID (%s) from your boto config file. " % default_project_id) acct_help += ( "If the above doesn't resolve your AccountProblem, please send mail to " "gs-team@google.com requesting assistance, noting the exact command you " "ran, the fact that you received a 403 AccountProblem error, and your " "project ID. Please do not post your project ID on StackOverflow. " "Note: It's possible to use Google Cloud Storage without enabling " "billing if you're only listing or reading objects for which you're " "authorized, or if you're uploading objects to a bucket billed to a " "project that has billing enabled. But if you're attempting to create " "buckets or upload objects to a bucket owned by your own project, you " "must first enable billing for that project.") return acct_help def _CheckAndHandleCredentialException(e, args): # Provide detail to users who have no boto config file (who might previously # have been using gsutil only for accessing publicly readable buckets and # objects). # pylint: disable=g-import-not-at-top from gslib.util import HasConfiguredCredentials if (not HasConfiguredCredentials() and not boto.config.get_value('Tests', 'bypass_anonymous_access_warning', False)): # The check above allows tests to assert that we get a particular, # expected failure, rather than always encountering this error message # when there are no configured credentials. This allows tests to # simulate a second user without permissions, without actually requiring # two separate configured users. if os.environ.get('CLOUDSDK_WRAPPER') == '1': _OutputAndExit('\n'.join(textwrap.wrap( 'You are attempting to access protected data with no configured ' 'credentials. Please visit ' 'https://cloud.google.com/console#/project and sign up for an ' 'account, and then run the "gcloud auth login" command to ' 'configure gsutil to use these credentials.'))) else: _OutputAndExit('\n'.join(textwrap.wrap( 'You are attempting to access protected data with no configured ' 'credentials. Please visit ' 'https://cloud.google.com/console#/project and sign up for an ' 'account, and then run the "gsutil config" command to configure ' 'gsutil to use these credentials.'))) elif (e.reason and (e.reason == 'AccountProblem' or e.reason == 'Account disabled.' or 'account for the specified project has been disabled' in e.reason) and ','.join(args).find('gs://') != -1): _OutputAndExit('\n'.join(textwrap.wrap( _ConstructAccountProblemHelp(e.reason)))) def _RunNamedCommandAndHandleExceptions(command_runner, command_name, args=None, headers=None, debug_level=0, parallel_operations=False): """Runs the command with the given command runner and arguments.""" # pylint: disable=g-import-not-at-top from gslib.util import GetConfigFilePath from gslib.util import IS_WINDOWS from gslib.util import IsRunningInteractively try: # Catch ^C so we can print a brief message instead of the normal Python # stack trace. Register as a final signal handler because this handler kills # the main gsutil process (so it must run last). RegisterSignalHandler(signal.SIGINT, _HandleControlC, is_final_handler=True) # Catch ^\ so we can force a breakpoint in a running gsutil. if not IS_WINDOWS: RegisterSignalHandler(signal.SIGQUIT, _HandleSigQuit) return command_runner.RunNamedCommand(command_name, args, headers, debug_level, parallel_operations) except AttributeError as e: if str(e).find('secret_access_key') != -1: _OutputAndExit('Missing credentials for the given URI(s). Does your ' 'boto config file contain all needed credentials?') else: _OutputAndExit(str(e)) except gslib.exception.CommandException as e: _HandleCommandException(e) except getopt.GetoptError as e: _HandleCommandException(gslib.exception.CommandException(e.msg)) except boto.exception.InvalidUriError as e: _OutputAndExit('InvalidUriError: %s.' % e.message) except gslib.exception.InvalidUrlError as e: _OutputAndExit('InvalidUrlError: %s.' % e.message) except boto.auth_handler.NotReadyToAuthenticate: _OutputAndExit('NotReadyToAuthenticate') except OSError as e: _OutputAndExit('OSError: %s.' % e.strerror) except IOError as e: if (e.errno == errno.EPIPE or (IS_WINDOWS and e.errno == errno.EINVAL) and not IsRunningInteractively()): # If we get a pipe error, this just means that the pipe to stdout or # stderr is broken. This can happen if the user pipes gsutil to a command # that doesn't use the entire output stream. Instead of raising an error, # just swallow it up and exit cleanly. sys.exit(0) else: raise except wildcard_iterator.WildcardException as e: _OutputAndExit(e.reason) except ProjectIdException as e: _OutputAndExit( 'You are attempting to perform an operation that requires a ' 'project id, with none configured. Please re-run ' 'gsutil config and make sure to follow the instructions for ' 'finding and entering your default project id.') except BadRequestException as e: if e.reason == 'MissingSecurityHeader': _CheckAndHandleCredentialException(e, args) _OutputAndExit(e) except AccessDeniedException as e: _CheckAndHandleCredentialException(e, args) _OutputAndExit(e) except ArgumentException as e: _OutputAndExit(e) except ServiceException as e: _OutputAndExit(e) except apitools_exceptions.HttpError as e: # These should usually be retried by the underlying implementation or # wrapped by CloudApi ServiceExceptions, but if we do get them, # print something useful. _OutputAndExit('HttpError: %s, %s' % (getattr(e.response, 'status', ''), e.content or '')) except socket.error as e: if e.args[0] == errno.EPIPE: # Retrying with a smaller file (per suggestion below) works because # the library code send loop (in boto/s3/key.py) can get through the # entire file and then request the HTTP response before the socket # gets closed and the response lost. _OutputAndExit( 'Got a "Broken pipe" error. This can happen to clients using Python ' '2.x, when the server sends an error response and then closes the ' 'socket (see http://bugs.python.org/issue5542). If you are trying to ' 'upload a large object you might retry with a small (say 200k) ' 'object, and see if you get a more specific error code.' ) else: _HandleUnknownFailure(e) except Exception as e: # Check for two types of errors related to service accounts. These errors # appear to be the same except for their messages, but they are caused by # different problems and both have unhelpful error messages. Moreover, # the error type belongs to PyOpenSSL, which is not necessarily installed. if 'mac verify failure' in str(e): _OutputAndExit( 'Encountered an error while refreshing access token. ' 'If you are using a service account,\nplease verify that the ' 'gs_service_key_file_password field in your config file,' '\n%s, is correct.' % GetConfigFilePath()) elif 'asn1 encoding routines' in str(e): _OutputAndExit( 'Encountered an error while refreshing access token. ' 'If you are using a service account,\nplease verify that the ' 'gs_service_key_file field in your config file,\n%s, is correct.' % GetConfigFilePath()) _HandleUnknownFailure(e) def _PerformTabCompletion(command_runner): """Performs gsutil-specific tab completion for the shell.""" # argparse and argcomplete are bundled with the Google Cloud SDK. # When gsutil is invoked from the Google Cloud SDK, both should be available. try: import argcomplete import argparse except ImportError as e: _OutputAndExit('A library required for performing tab completion was' ' not found.\nCause: %s' % e) parser = argparse.ArgumentParser(add_help=False) subparsers = parser.add_subparsers() command_runner.ConfigureCommandArgumentParsers(subparsers) argcomplete.autocomplete(parser, exit_method=sys.exit) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
chenjun0210/tensorflow
tensorflow/contrib/layers/python/ops/sparse_ops_test.py
18
6055
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.contrib.layers.python.ops.sparse_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.layers.python.ops import sparse_ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class SparseOpsTest(test.TestCase): def test_dense_to_sparse_tensor_1d(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor([1, 0, 2, 0]) result = sess.run(st) self.assertEqual(result.indices.dtype, np.int64) self.assertEqual(result.values.dtype, np.int32) self.assertEqual(result.dense_shape.dtype, np.int64) self.assertAllEqual([[0], [2]], result.indices) self.assertAllEqual([1, 2], result.values) self.assertAllEqual([4], result.dense_shape) def test_dense_to_sparse_tensor_1d_float(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor([1.5, 0.0, 2.3, 0.0]) result = sess.run(st) self.assertEqual(result.indices.dtype, np.int64) self.assertEqual(result.values.dtype, np.float32) self.assertEqual(result.dense_shape.dtype, np.int64) self.assertAllEqual([[0], [2]], result.indices) self.assertAllClose([1.5, 2.3], result.values) self.assertAllEqual([4], result.dense_shape) def test_dense_to_sparse_tensor_1d_bool(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor([True, False, True, False]) result = sess.run(st) self.assertEqual(result.indices.dtype, np.int64) self.assertEqual(result.values.dtype, np.bool) self.assertEqual(result.dense_shape.dtype, np.int64) self.assertAllEqual([[0], [2]], result.indices) self.assertAllEqual([True, True], result.values) self.assertAllEqual([4], result.dense_shape) def test_dense_to_sparse_tensor_1d_str(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor([b'qwe', b'', b'ewq', b'']) result = sess.run(st) self.assertEqual(result.indices.dtype, np.int64) self.assertEqual(result.values.dtype, np.object) self.assertEqual(result.dense_shape.dtype, np.int64) self.assertAllEqual([[0], [2]], result.indices) self.assertAllEqual([b'qwe', b'ewq'], result.values) self.assertAllEqual([4], result.dense_shape) def test_dense_to_sparse_tensor_1d_str_special_ignore(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor( [b'qwe', b'', b'ewq', b''], ignore_value=b'qwe') result = sess.run(st) self.assertEqual(result.indices.dtype, np.int64) self.assertEqual(result.values.dtype, np.object) self.assertEqual(result.dense_shape.dtype, np.int64) self.assertAllEqual([[1], [2], [3]], result.indices) self.assertAllEqual([b'', b'ewq', b''], result.values) self.assertAllEqual([4], result.dense_shape) def test_dense_to_sparse_tensor_2d(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor([[1, 2, 0, 0], [3, 4, 5, 0]]) result = sess.run(st) self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]], result.indices) self.assertAllEqual([1, 2, 3, 4, 5], result.values) self.assertAllEqual([2, 4], result.dense_shape) def test_dense_to_sparse_tensor_3d(self): with self.test_session() as sess: st = sparse_ops.dense_to_sparse_tensor([[[1, 2, 0, 0], [3, 4, 5, 0]], [[7, 8, 0, 0], [9, 0, 0, 0]]]) result = sess.run(st) self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices) self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values) self.assertAllEqual([2, 2, 4], result.dense_shape) def test_dense_to_sparse_tensor_1d_no_shape(self): with self.test_session() as sess: tensor = array_ops.placeholder(shape=[None], dtype=dtypes.int32) st = sparse_ops.dense_to_sparse_tensor(tensor) result = sess.run(st, feed_dict={tensor: [0, 100, 0, 3]}) self.assertAllEqual([[1], [3]], result.indices) self.assertAllEqual([100, 3], result.values) self.assertAllEqual([4], result.dense_shape) def test_dense_to_sparse_tensor_3d_no_shape(self): with self.test_session() as sess: tensor = array_ops.placeholder( shape=[None, None, None], dtype=dtypes.int32) st = sparse_ops.dense_to_sparse_tensor(tensor) result = sess.run(st, feed_dict={ tensor: [[[1, 2, 0, 0], [3, 4, 5, 0]], [[7, 8, 0, 0], [9, 0, 0, 0]]] }) self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices) self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values) self.assertAllEqual([2, 2, 4], result.dense_shape) def test_convert_to_sparse_undef_shape(self): with self.test_session(): with self.assertRaises(ValueError): tensor = array_ops.placeholder(dtype=dtypes.int32) sparse_ops.dense_to_sparse_tensor(tensor) if __name__ == '__main__': test.main()
apache-2.0
fracturica/shardlib
shardlib/comp_analysis/SIMCompAnalysis.py
1
23592
import dataProcessing as dp import plotFuncs as pf import numpy as np from matplotlib.ticker import MultipleLocator, FormatStrFormatter from mpl_toolkits.axes_grid1 import host_subplot import mpl_toolkits.axisartist as AA from matplotlib.path import Path from mpl_toolkits.mplot3d import Axes3D import matplotlib as mpl from compAnalysisBase import CompAnalysisBase class SIMCompAnalysis(CompAnalysisBase): def __init__(self, leavesQueue, criteria, sifs): self.queue = leavesQueue self.sifs = sifs self.crit = criteria def printQueueItems(self, items): self.queue.printTitle() for i in sorted(items): self.queue.printQueueItem(i) def getItemNodeDict(self, items, queue): qdict = queue.getQueueDict() return dict([(i, qdict[i]) for i in items]) def calcAlphaVal(self, sif, item): vals = len(self.dataDicts[0][0][sif][item]) if vals > 1000: return 0.1 else: return 1 class BoxCompPlot(SIMCompAnalysis): def createCompBoxPlot(self, items, errType, fig): self.items = items self.errType = errType self.createDataDictAndEstBoxPlot() self.createDataStrBoxPlot() self.createFigure(fig) def createDataStrBoxPlot(self): dd = self.getItemNodeDict(self.items, self.queue) optKey = self.getLeavesOptKey() data = [dd, optKey, 'Number in Queue', ''] self.dataStr = [data] def getLeavesOptKey(self): return sorted(self.est.items(), key=lambda x: abs(x[1]))[0][0] def createDataDictAndEstBoxPlot(self): dataDict = {s: {} for s in self.sifs} est = {i: {} for i in self.items} dd = self.getItemNodeDict(self.items, self.queue) for i in self.items: node = dd[i] errs, est[i] = self.getNodeErrsEst(node) for s in self.sifs: dataDict[s][i] = errs[s] self.est = {i: est[i][self.crit[1]] for i in self.items} self.dataDicts = [dataDict] def getNodeErrsEst(self, node): adn = dp.AnalysisNodeData(node, self.sifs) adn.performOperations() est = adn.getEstimates()[self.crit[0]] errs = adn.getErrors()[self.errType] return errs, est class HistCompPlot(SIMCompAnalysis): def createCompHistPlot(self, items, errType, xlim, fig): self.fig = fig self.items = items self.errType = errType self.xlim = xlim self.createDataStr() self.createDataDict() self.createFigure() def createDataStr(self): dd = self.getItemNodeDict(self.items.keys(), self.queue) xlabel = 'errors "{0}"'.format(self.errType) data = [dd, None, xlabel, 'hist'] self.dataStr = [data] def createDataDict(self): data = {s: {} for s in self.sifs} for i in self.items.keys(): node = self.dataStr[0][0][i] errs = self.getNodeErrors(node) for s in self.sifs: data[s][i] = errs[s] self.dataDicts = [data] def getNodeErrors(self, node): adn = dp.AnalysisNodeData(node, self.sifs) adn.performOperations() errs = adn.getErrors()[self.errType] return errs def setAxesXlim(self): for ax in self.axes: ax.set_xlim(self.xlim) def setAxesYlim(self): ymin, ymax = 10e16, 10e-16 for ax in self.axes: y1, y2 = ax.get_ylim() ymin = y1 if y1 < ymin else ymin ymax = y2 if y2 > ymax else ymax for ax in self.axes: ax.set_ylim((ymin, ymax)) def setLegend(self, handles): text = 'Node: ' labels = [text + str(i) for i in sorted(handles.keys())] handles = [handles[i] for i in sorted(handles.keys())] self.axes[0].legend(handles, labels, bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0) def createFigure(self): self.axes = [] self.createFigureAxes() handles = {} for k in range(len(self.axes)): s = self.sifs[k] for i in self.items.keys(): n, b, p = self.axes[k].hist( self.dataDicts[0][s][i], self.items[i], normed=True, alpha=0.5) handles[i] = p[0] self.setAxesXlim() self.setAxesYlim() self.setLegend(handles) self.setXlabels() self.printQueueItems(self.items.keys()) class CorrCompPlot(SIMCompAnalysis): def createCompCorrPlot(self, items, quantityType, ylim, fig): self.fig = fig self.items = items self.qt = quantityType self.ylim = ylim self.createDataStr() self.createDataDict() self.createFigure() def createDataStr(self): dd = self.getItemNodeDict(self.items, self.queue) data = [dd, None, 'analytical values', 'analysis vs analytical'] self.dataStr = [data] def createDataDict(self): dataX = {s: {} for s in self.sifs} dataY = {s: {} for s in self.sifs} for i in self.items: node = self.dataStr[0][0][i] anSol, res = self.getNodeParams(node) for s in self.sifs: dataX[s][i] = anSol[s] dataY[s][i] = res[s] self.dataDicts = [[dataX, dataY]] def getNodeParams(self, node): adn = dp.AnalysisNodeData(node, self.sifs) adn.performOperations() anSol = adn.getAnSol() res = adn.getDataByType(self.qt) return anSol, res def getReferenceXYVals(self): minV = {s: 10e16 for s in self.sifs} maxV = {s: -10e16 for s in self.sifs} for s in self.sifs: for i in self.items: mn = min(self.dataDicts[0][0][s][i]) mx = max(self.dataDicts[0][0][s][i]) minV[s] = mn if mn < minV[s] else minV[s] maxV[s] = mx if mx > maxV[s] else maxV[s] if self.qt == 'results': refX = {s: [minV[s], maxV[s]] for s in self.sifs} return refX, refX elif self.qt in ['difference', 'normedDiff']: refX = {s: [max(0, minV[s]), maxV[s]] for s in self.sifs} refY = {s: [0, 0] for s in self.sifs} return refX, refY else: raise NotImplementedError def getXYVals(self, sif, item): if self.qt == 'results': X = self.dataDicts[0][0][sif][item] Y = self.dataDicts[0][1][sif][item] elif self.qt in ['difference', 'normedDiff']: X = np.abs(self.dataDicts[0][0][sif][item]) Y = self.dataDicts[0][1][sif][item] else: raise NotImplementedError return X, Y def createPlot(self): self.handles = {} refX, refY = self.getReferenceXYVals() for k in range(len(self.axes)): s = self.sifs[k] for i in self.items: alpha = self.calcAlphaVal(s, i) X, Y = self.getXYVals(s, i) p, = self.axes[k].plot(X, Y, '.', alpha=alpha) self.handles[i] = p r, = self.axes[k].plot(refX[s], refY[s], 'k', lw=1.5) self.handles['reference'] = r def setXLim(self): refX, refY = self.getReferenceXYVals() for k in range(len(self.axes)): s = self.sifs[k] self.axes[k].set_xlim(refX[s]) def setLegend(self): text = 'Node: ' labels = [text + str(i) for i in self.items] handles = [self.handles[i] for i in self.items] if 'reference' in self.handles.keys(): handles.append(self.handles['reference']) labels.append('ref line') self.axes[0].legend(handles, labels, bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0) def setYLim(self): if isinstance(self.ylim, (list, tuple)): for ax in self.axes: ax.set_ylim(self.ylim) def createFigure(self): self.axes = [] self.createFigureAxes() self.createPlot() self.setXLim() self.setLegend() self.printQueueItems(self.items) self.setYLim() class RangeCompPlot(SIMCompAnalysis): def createCompRangePlot(self, items, opts, fig): self.fig = fig self.items = items self.opts = opts self.createDataStr() self.createDataDict() self.createFigure() def createDataStr(self): self.dataStr = [] qdict = self.queue.getQueueDict() for k in sorted(self.items.keys()): optSim = self.getOptSim(qdict[k]) data = [{k: qdict[k]}, optSim, 'angles', self.getSubplotTitle(qdict[k])] self.dataStr.append(data) def getOptSim(self, node): if self.opts['optSim']: sims = node.getSuccessfulMembers() optSim = pf.getSimIdsWithLowestErrorPerDH( sims, self.crit[0], self.crit[1]).values()[0][0] return optSim else: return None def createDataDict(self): self.dataDicts = [] for item in self.dataStr: node = item[0].values()[0] self.dataDicts.append(self.getNodeParams(node)) def getNodeParams(self, node): adn = dp.AnalysisNodeData(node, self.sifs) adn.performOperations() angles = adn.getAngles() results = adn.getResults() ansol = adn.getAnSol() errors = adn.getErrors()[self.opts['errors']] return angles, results, ansol, errors def createSlices(self): self.slices = [] i = 0 for k in sorted(self.items.keys()): numInt = self.items[k] angles = self.dataDicts[i][0] sl = self.createSliceIndices(angles, numInt) self.slices.append(sl) i += 1 def createSliceIndices(self, vals, numInts): intLen = (max(vals) - min(vals)) / float(numInts) indices = [[] for i in range(numInts)] for x in vals: i = int(x / intLen) if i < numInts - 1: indices[i].append(x) else: indices[-1].append(x) if [] in indices: raise ValueError('Try reducing the number of intervals.') sliceInd = [[] for i in range(numInts)] for i in range(numInts): minVal = indices[i][0] maxVal = indices[i][-1] ind0 = np.where(vals == minVal)[0][0] ind1 = np.where(vals == maxVal)[-1][-1] + 1 sliceInd[i].append(ind0) sliceInd[i].append(ind1) sliceInd[-1][1] += 1 return sliceInd def createFigure(self): self.axes = [] self.createFigureAxes() if self.opts['range']: self.createSlices() self.plotRangeArea() if self.opts['dataPoints']: self.createDataPointsPlot() if self.opts['analytical']: self.createAnSolPlot() if self.opts['optSim']: self.createOptSimPlot() self.setXLim() self.createLegend() self.setSubplotTitles() self.setYlimits() def createLegend(self): handles = [] labels = [] h, l = self.axes[0].get_legend_handles_labels() ind = len(self.dataStr) - 1 self.axes[ind].legend(h, l, bbox_to_anchor=(1, 1.02), loc=2) def setXLim(self): for n in range(len(self.dataStr)): i = self.getItemKey(n) for sif in self.sifs: ax = self.getAxes(i, sif) angles = self.dataDicts[n][0] ax.set_xlim((min(angles), max(angles))) def createOptSimPlot(self): for n in range(len(self.dataDicts)): i = self.getItemKey(n) ad = dp.AnalysisData(self.dataStr[n][1]) ad.calcAnSol() ad.calculateStats() angles = ad.getAngles() for sif in self.sifs: ax = self.getAxes(i, sif) res = ad.getResults()[sif] ax.plot(angles, res, 'lime', lw=1, label='optSim') def createDataPointsPlot(self): for n in range(len(self.dataStr)): i = self.getItemKey(n) for sif in self.sifs: angles = self.dataDicts[n][0] ax = self.getAxes(i, sif) for dt in self.opts['data']: dInd, color = self.getDataIndAndColor(dt) data = self.dataDicts[n][dInd][sif] alpha = self.calcAlphaValRP(n) ax.plot(angles, data, linestyle='-', marker='.', color=color, alpha=alpha, label=dt) def calcAlphaValRP(self, n): vals = len(self.dataDicts[n][0]) if vals > 1000: return 0.05 else: return 0.3 def createAnSolPlot(self): for n in range(len(self.items.keys())): i = self.getItemKey(n) for sif in self.sifs: ax = self.getAxes(i, sif) angles = self.dataDicts[n][0] anSol = self.dataDicts[n][2][sif] ax.plot(angles, anSol, 'k', lw=2, label='analytical') def getAxes(self, item, sif): itemInd = sorted(self.items.keys()).index(item) itemLen = len(self.items) ax = self.axes[itemLen * self.sifs.index(sif) + itemInd] return ax def getItemKey(self, n): return sorted(self.items.keys())[n] def plotRangeArea(self): for n in range(len(self.items)): i = self.getItemKey(n) for sif in self.sifs: axes = self.getAxes(i, sif) self.plotRangeAreaPerAxes(axes, n, sif) def getDataIndAndColor(self, dataType): dataInds = {'results': 1, 'errors': 3} colors = {'results': 'b', 'errors': 'r'} return dataInds[dataType], colors[dataType] def createVerts(self, slices, angles, values, func): x, y, verts = [], [], [] valsl = [values[s[0] - 1 if s[0] > 0 else 0:s[1]] for s in slices] angsl = [angles[s[0] - 1 if s[0] > 0 else 0:s[1]] for s in slices] for a in angsl: x.append(a[0]) x.append(a[-1]) for v in valsl: y.append(func(v)) y.append(func(v)) verts = [[xi, yi] for xi, yi in zip(x, y)] return verts def createVerts2(self, slices, angles, values, func): x, y, verts = [], [], [] valsl = [values[s[0]:s[1]] for s in slices] angsl = [angles[s[0]:s[1]] for s in slices] for an, va in zip(angsl, valsl): y.append(func(va)) print va, y print np.where(va == y[-1]) ind = np.where(va == y[-1])[0][0] x.append(an[ind]) x.append(angles[-1]) x.insert(0, angles[0]) yavg = 0.5 * (y[0] + y[-1]) y.append(yavg) y.insert(0, yavg) verts = [[xi, yi] for xi, yi in zip(x, y)] return verts def plotRangeAreaPerAxes(self, axes, itemInd, sif): vertMethods = {1: self.createVerts, 2: self.createVerts2} vertFunc = vertMethods[self.opts['rangeType']] slices = self.slices[itemInd] angles = self.dataDicts[itemInd][0] for dt in self.opts['data']: dInd, color = self.getDataIndAndColor(dt) values = self.dataDicts[itemInd][dInd][sif] verts1 = vertFunc(slices, angles, values, min) verts2 = vertFunc(slices, angles, values, max)[::-1] verts = verts1 + verts2 + [verts2[-1]] codes = self.createClosedPathCodes(verts) p = Path(verts, codes) patch = mpl.patches.PathPatch( p, facecolor=color, edgecolor='none', alpha=0.2, label=dt + ' range') axes.add_patch(patch) patch = mpl.patches.PathPatch(p, edgecolor=color, fill=False, lw=0.75, alpha=0.6) axes.add_patch(patch) def createClosedPathCodes(self, verts): codes = [Path.MOVETO] for i in range(len(verts) - 2): codes.append(Path.LINETO) codes.append(Path.CLOSEPOLY) return codes class BoundsCompPlot(SIMCompAnalysis): def createBoundsPlot(self, items, targets, fig, tol=0.1, iterLim=100): self.items = items self.targets = targets self.fig = fig self.iterLim = iterLim self.tol = tol self.createDataStr() self.createDataDicts() self.printStats() self.createFigure() def createDataStr(self): self.dataStr = [] qdict = self.queue.getQueueDict() for i in self.items: dd = [{i: qdict[i]}, None, 'angles', self.getSubplotTitle(qdict[i])] self.dataStr.append(dd) def createDataDicts(self): self.dataDicts = [] for n in range(len(self.items)): i = self.items[n] log = {s: {t: {'sigma': [], 'pip': []} for t in self.targets.keys()} for s in self.sifs} node = self.dataStr[n][0][i] adn = dp.AnalysisNodeData(node, self.sifs) adn.performOperations() sigmaUp = 2 * adn.getAnSolParams()['sigma'] sigmaLow = 0 for s in self.sifs: for t in self.targets.keys(): log[s][t] = self.findSigmaBound( adn, sigmaUp, sigmaLow, s, self.targets[t], log[s][t]) self.dataDicts.append([adn, log]) def printStats(self): for n in range(len(self.dataStr)): i = self.items[n] print self.dataStr[n][3] log = self.dataDicts[n][1] for s in self.sifs: sigmas, bounds, its = [], [], [] for t in log[s].keys(): u = log[s][t] sigmas.append(u['sigma'][-1]) bounds.append(u['pip'][-1]) its.append(len(u['sigma'])) info = '{0}sigma=[{1:.4}, {2:.4}] | bounds=[{3:.4}%, {4:.4}%] | iterations=[{5}, {6}]'.format( ' {0} '.format(s), sigmas[0], sigmas[1], bounds[0], bounds[1], its[0], its[1]) print info def createFigure(self): self.axes = [] self.createFigureAxes() self.createPlot() self.setXLimits() self.setYlimits() self.setSubplotTitles() def setXLimits(self): for n in range(len(self.dataStr)): i = self.items[n] adn = self.dataDicts[n][0] a = adn.getAngles() lims = (min(a), max(a)) for s in self.sifs: ax = self.getAxes(i, s) ax.set_xlim(lims) def getAxes(self, item, sif): itemLen = len(self.items) itemInd = self.items.index(item) ax = self.axes[itemLen * self.sifs.index(sif) + itemInd] return ax def getAlphaVal(self, item): n = self.items.index(item) adn = self.dataDicts[n][0] if len(adn.getAngles()) > 1000: return 0.1 else: return 1 def createPlot(self): for n in range(len(self.dataStr)): i = self.items[n] adn = self.dataDicts[n][0] logs = self.dataDicts[n][1] alpha = self.getAlphaVal(i) for s in self.sifs: ax = self.getAxes(i, s) sigmaUpper = logs[s]['upper']['sigma'][-1] sigmaLower = logs[s]['lower']['sigma'][-1] ins, outs = self.getInOutPoints(adn, sigmaLower, sigmaUpper, s) ax.plot(ins[0], ins[1], 'b.', label='inside bounds', alpha=alpha) ax.plot(outs[0], outs[1], 'r.', label='outside bounds', alpha=alpha) angles = adn.getAngles() anSol = adn.getAnSol()[s] ax.plot(angles, anSol, 'k', lw=1.5, label='analytical') lowerBound = adn.calcSIFsForSigmaAndSIF( sigmaLower, s) upperBound = adn.calcSIFsForSigmaAndSIF( sigmaUpper, s) ax.plot(angles, upperBound, 'lime', lw=1.5, label='bounds') ax.plot(angles, lowerBound, 'lime', lw=1.5) def findSigmaBound(self, adn, sigmaUp, sigmaLow, sif, target, log): sigma = 0.5 * (sigmaUp + sigmaLow) pip = self.getPercentPointsInPoly(adn, sigma, sif) log['pip'].append(pip) log['sigma'].append(sigma) if ((pip >= target - self.tol and pip <= target + self.tol) or (len(log['sigma']) == self.iterLim)): return log elif pip < target - self.tol: sigmaLow = sigma return self.findSigmaBound(adn, sigmaUp, sigmaLow, sif, target, log) elif pip > target + self.tol: sigmaUp = sigma return self.findSigmaBound(adn, sigmaUp, sigmaLow, sif, target, log) else: raise ValueError('unexpected condition reached') def getPercentPointsInPoly(self, adn, sigma, sif): allnum, numin, numout = self.countPointInOutOfContour( adn, sigma, sif) assert abs(numin + numout - allnum) < 10e-8 return float(numin) / float(allnum) * 100 def countPointInOutOfContour(self, adn, sigma, sif): tfl = self.getInOutOfContour(adn, sigma, sif) numin = np.sum(tfl) allnum = len(tfl) numout = allnum - numin return allnum, numin, numout def getInOutOfContour(self, adn, sigma, sif): angles = adn.getAngles() results = abs(adn.getResults()[sif]) points = [[xi, yi] for xi, yi in zip(angles, results)] yVals = abs(np.array(adn.calcSIFsForSigmaAndSIF(sigma, sif))) return self.getInOutPointsArray(angles, yVals, points) def getInOutPointsArray(self, angles, yVals, points): path = Path(self.createVertsForPolyPath(angles, yVals)) return path.contains_points(points, radius=0) def getInOutPoints(self, adn, sigmaLow, sigmaUp, sif): inoutLow = self.getInOutOfContour(adn, sigmaLow, sif) inoutUp = self.getInOutOfContour(adn, sigmaUp, sif) angles = adn.getAngles() res = adn.getResults()[sif] inAngles, inVals = [], [] outAngles, outVals = [], [] for i in range(len(inoutUp)): if inoutLow[i] or not inoutUp[i]: outAngles.append(angles[i]) outVals.append(res[i]) else: inAngles.append(angles[i]) inVals.append(res[i]) return [[inAngles, inVals], [outAngles, outVals]] def createVertsForPolyPath(self, x, y): verts = [[xi, yi] for xi, yi in zip(x, y)] verts.insert(0, [verts[0][0], -10e16]) verts.append([verts[-1][0], -10e16]) return verts
mit
DTOcean/dtocean-core
dtocean_core/tools/external.py
1
3947
# -*- coding: utf-8 -*- # Copyright (C) 2016-2018 Mathew Topper # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import subprocess from . import Tool from ..utils.process import script class WECSimulatorTool(Tool): """Start dtocean-wec""" @classmethod def get_name(cls): return "WEC Simulator" @classmethod def declare_inputs(cls): '''A class method to declare all the variables required as inputs by this interface. Returns: list: List of inputs identifiers Example: The returned value can be None or a list of identifier strings which appear in the data descriptions. For example:: inputs = ["My:first:variable", "My:second:variable", ] ''' return None @classmethod def declare_outputs(cls): '''A class method to declare all the output variables provided by this interface. Returns: list: List of output identifiers Example: The returned value can be None or a list of identifier strings which appear in the data descriptions. For example:: outputs = ["My:first:variable", "My:third:variable", ] ''' return None @classmethod def declare_optional(cls): '''A class method to declare all the variables which should be flagged as optional. Returns: list: List of optional variable identifiers Note: Currently only inputs marked as optional have any logical effect. However, this may change in future releases hence the general approach. Example: The returned value can be None or a list of identifier strings which appear in the declare_inputs output. For example:: optional = ["My:first:variable", ] ''' return None @classmethod def declare_id_map(cls): '''Declare the mapping for variable identifiers in the data description to local names for use in the interface. This helps isolate changes in the data description or interface from effecting the other. Returns: dict: Mapping of local to data description variable identifiers Example: The returned value must be a dictionary containing all the inputs and outputs from the data description and a local alias string. For example:: id_map = {"var1": "My:first:variable", "var2": "My:second:variable", "var3": "My:third:variable" } ''' id_map = {} return id_map def configure(self, kwargs=None): """Does nothing in this case""" return def connect(self, **kwargs): script_path = script("dtocean-wec.exe") if script_path is None: return si = subprocess.STARTUPINFO() si.dwFlags |= subprocess.STARTF_USESHOWWINDOW subprocess.call(script_path, startupinfo=si) return
gpl-3.0
amenonsen/ansible
lib/ansible/modules/network/nxos/nxos_overlay_global.py
84
5899
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_overlay_global extends_documentation_fragment: nxos version_added: "2.2" short_description: Configures anycast gateway MAC of the switch. description: - Configures anycast gateway MAC of the switch. author: Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - Default restores params default value - Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE", "EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE" options: anycast_gateway_mac: description: - Anycast gateway mac of the switch. required: true ''' EXAMPLES = ''' - nxos_overlay_global: anycast_gateway_mac: "b.b.b" ''' RETURN = ''' commands: description: commands sent to the device returned: always type: list sample: ["fabric forwarding anycast-gateway-mac 000B.000B.000B"] ''' import re from ansible.module_utils.network.nxos.nxos import get_config, load_config from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.config import CustomNetworkConfig PARAM_TO_COMMAND_KEYMAP = { 'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac', } def get_existing(module, args): existing = {} config = str(get_config(module)) for arg in args: command = PARAM_TO_COMMAND_KEYMAP[arg] has_command = re.findall(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M) value = '' if has_command: value = has_command[0] existing[arg] = value return existing def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if value: new_dict[new_key] = value return new_dict def get_commands(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) for key, proposed in proposed_commands.items(): existing_value = existing_commands.get(key) if proposed == 'default' and existing_value: commands.append('no {0} {1}'.format(key, existing_value)) elif 'anycast-gateway-mac' in key and proposed != 'default': proposed = normalize_mac(proposed, module) existing_value = normalize_mac(existing_value, module) if proposed != existing_value: command = '{0} {1}'.format(key, proposed) commands.append(command) if commands: candidate.add(commands, parents=[]) def normalize_mac(proposed_mac, module): if proposed_mac is None: return '' try: if '-' in proposed_mac: splitted_mac = proposed_mac.split('-') if len(splitted_mac) != 6: raise ValueError for octect in splitted_mac: if len(octect) != 2: raise ValueError elif '.' in proposed_mac: splitted_mac = [] splitted_dot_mac = proposed_mac.split('.') if len(splitted_dot_mac) != 3: raise ValueError for octect in splitted_dot_mac: if len(octect) > 4: raise ValueError else: octect_len = len(octect) padding = 4 - octect_len splitted_mac.append(octect.zfill(padding + 1)) elif ':' in proposed_mac: splitted_mac = proposed_mac.split(':') if len(splitted_mac) != 6: raise ValueError for octect in splitted_mac: if len(octect) != 2: raise ValueError else: raise ValueError except ValueError: module.fail_json(msg='Invalid MAC address format', proposed_mac=proposed_mac) joined_mac = ''.join(splitted_mac) mac = [joined_mac[i:i + 4] for i in range(0, len(joined_mac), 4)] return '.'.join(mac).upper() def main(): argument_spec = dict( anycast_gateway_mac=dict(required=True, type='str'), ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False, 'commands': [], 'warnings': warnings} args = PARAM_TO_COMMAND_KEYMAP.keys() existing = get_existing(module, args) proposed = dict((k, v) for k, v in module.params.items() if v is not None and k in args) candidate = CustomNetworkConfig(indent=3) get_commands(module, existing, proposed, candidate) if candidate: candidate = candidate.items_text() result['commands'] = candidate if not module.check_mode: load_config(module, candidate) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
awalls-cx18/gnuradio
gr-blocks/python/blocks/qa_vector_sink_source.py
6
5178
#!/usr/bin/env python # # Copyright 2008,2010,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest, blocks import pmt import math def make_tag(key, value, offset, srcid=None): tag = gr.tag_t() tag.key = pmt.string_to_symbol(key) tag.value = pmt.to_pmt(value) tag.offset = offset if srcid is not None: tag.srcid = pmt.to_pmt(srcid) return tag def compare_tags(a, b): return a.offset == b.offset and pmt.equal(a.key, b.key) and \ pmt.equal(a.value, b.value) and pmt.equal(a.srcid, b.srcid) class test_vector_sink_source(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_001(self): # Test that sink has data set in source for the simplest case src_data = [float(x) for x in range(16)] expected_result = tuple(src_data) src = blocks.vector_source_f(src_data) dst = blocks.vector_sink_f() self.tb.connect(src, dst) self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) def test_002(self): # Test vectors (the gnuradio vector I/O type) src_data = [float(x) for x in range(16)] expected_result = tuple(src_data) src = blocks.vector_source_f(src_data, False, 2) dst = blocks.vector_sink_f(2) self.tb.connect(src, dst) self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) def test_003(self): # Test that we can only make vectors (the I/O type) if the input # vector has sufficient size src_data = [float(x) for x in range(16)] expected_result = tuple(src_data) self.assertRaises(RuntimeError, lambda : blocks.vector_source_f(src_data, False, 3)) def test_004(self): # Test sending and receiving tagged streams src_data = [float(x) for x in range(16)] expected_result = tuple(src_data) src_tags = tuple([make_tag('key', 'val', 0, 'src')]) expected_tags = src_tags[:] src = blocks.vector_source_f(src_data, repeat=False, tags=src_tags) dst = blocks.vector_sink_f() self.tb.connect(src, dst) self.tb.run() result_data = dst.data() result_tags = dst.tags() self.assertEqual(expected_result, result_data) self.assertEqual(len(result_tags), 1) self.assertTrue(compare_tags(expected_tags[0], result_tags[0])) def test_005(self): # Test that repeat works (with tagged streams) length = 16 src_data = [float(x) for x in range(length)] expected_result = tuple(src_data + src_data) src_tags = tuple([make_tag('key', 'val', 0, 'src')]) expected_tags = tuple([make_tag('key', 'val', 0, 'src'), make_tag('key', 'val', length, 'src')]) src = blocks.vector_source_f(src_data, repeat=True, tags=src_tags) head = blocks.head(gr.sizeof_float, 2*length) dst = blocks.vector_sink_f() self.tb.connect(src, head, dst) self.tb.run() result_data = dst.data() result_tags = dst.tags() self.assertEqual(expected_result, result_data) self.assertEqual(len(result_tags), 2) self.assertTrue(compare_tags(expected_tags[0], result_tags[0])) self.assertTrue(compare_tags(expected_tags[1], result_tags[1])) def test_006(self): # Test set_data src_data = [float(x) for x in range(16)] expected_result = tuple(src_data) src = blocks.vector_source_f((3,1,4)) dst = blocks.vector_sink_f() src.set_data(src_data) self.tb.connect(src, dst) self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) def test_007(self): # Test set_repeat src_data = [float(x) for x in range(16)] expected_result = tuple(src_data) src = blocks.vector_source_f(src_data, True) dst = blocks.vector_sink_f() src.set_repeat(False) self.tb.connect(src, dst) # will timeout if set_repeat does not work self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) if __name__ == '__main__': gr_unittest.run(test_vector_sink_source, "test_vector_sink_source.xml")
gpl-3.0
kmonsoor/python-for-android
python3-alpha/python3-src/Lib/json/decoder.py
46
13087
"""Implementation of JSONDecoder """ import binascii import re import sys import struct from json import scanner try: from _json import scanstring as c_scanstring except ImportError: c_scanstring = None __all__ = ['JSONDecoder'] FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL def _floatconstants(): _BYTES = binascii.unhexlify(b'7FF80000000000007FF0000000000000') if sys.byteorder != 'big': _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] nan, inf = struct.unpack('dd', _BYTES) return nan, inf, -inf NaN, PosInf, NegInf = _floatconstants() def linecol(doc, pos): if isinstance(doc, bytes): newline = b'\n' else: newline = '\n' lineno = doc.count(newline, 0, pos) + 1 if lineno == 1: colno = pos else: colno = pos - doc.rindex(newline, 0, pos) return lineno, colno def errmsg(msg, doc, pos, end=None): # Note that this function is called from _json lineno, colno = linecol(doc, pos) if end is None: fmt = '{0}: line {1} column {2} (char {3})' return fmt.format(msg, lineno, colno, pos) #fmt = '%s: line %d column %d (char %d)' #return fmt % (msg, lineno, colno, pos) endlineno, endcolno = linecol(doc, end) fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) #fmt = '%s: line %d column %d - line %d column %d (char %d - %d)' #return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end) _CONSTANTS = { '-Infinity': NegInf, 'Infinity': PosInf, 'NaN': NaN, } STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) BACKSLASH = { '"': '"', '\\': '\\', '/': '/', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', } def py_scanstring(s, end, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): """Scan the string s for a JSON string. End is the index of the character in s after the quote that started the JSON string. Unescapes all valid JSON string escape sequences and raises ValueError on attempt to decode an invalid string. If strict is False then literal control characters are allowed in the string. Returns a tuple of the decoded string and the index of the character in s after the end quote.""" chunks = [] _append = chunks.append begin = end - 1 while 1: chunk = _m(s, end) if chunk is None: raise ValueError( errmsg("Unterminated string starting at", s, begin)) end = chunk.end() content, terminator = chunk.groups() # Content is contains zero or more unescaped string characters if content: _append(content) # Terminator is the end of string, a literal control character, # or a backslash denoting that an escape sequence follows if terminator == '"': break elif terminator != '\\': if strict: #msg = "Invalid control character %r at" % (terminator,) msg = "Invalid control character {0!r} at".format(terminator) raise ValueError(errmsg(msg, s, end)) else: _append(terminator) continue try: esc = s[end] except IndexError: raise ValueError( errmsg("Unterminated string starting at", s, begin)) # If not a unicode escape sequence, must be in the lookup table if esc != 'u': try: char = _b[esc] except KeyError: msg = "Invalid \\escape: {0!r}".format(esc) raise ValueError(errmsg(msg, s, end)) end += 1 else: esc = s[end + 1:end + 5] next_end = end + 5 if len(esc) != 4: msg = "Invalid \\uXXXX escape" raise ValueError(errmsg(msg, s, end)) uni = int(esc, 16) # Check for surrogate pair on UCS-4 systems if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: msg = "Invalid \\uXXXX\\uXXXX surrogate pair" if not s[end + 5:end + 7] == '\\u': raise ValueError(errmsg(msg, s, end)) esc2 = s[end + 7:end + 11] if len(esc2) != 4: raise ValueError(errmsg(msg, s, end)) uni2 = int(esc2, 16) uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) next_end += 6 char = chr(uni) end = next_end _append(char) return ''.join(chunks), end # Use speedup if available scanstring = c_scanstring or py_scanstring WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) WHITESPACE_STR = ' \t\n\r' def JSONObject(s_and_end, strict, scan_once, object_hook, object_pairs_hook, memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR): s, end = s_and_end pairs = [] pairs_append = pairs.append # Backwards compatibility if memo is None: memo = {} memo_get = memo.setdefault # Use a slice to prevent IndexError from being raised, the following # check will raise a more specific ValueError if the string is empty nextchar = s[end:end + 1] # Normally we expect nextchar == '"' if nextchar != '"': if nextchar in _ws: end = _w(s, end).end() nextchar = s[end:end + 1] # Trivial empty object if nextchar == '}': if object_pairs_hook is not None: result = object_pairs_hook(pairs) return result, end pairs = {} if object_hook is not None: pairs = object_hook(pairs) return pairs, end + 1 elif nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end)) end += 1 while True: key, end = scanstring(s, end, strict) key = memo_get(key, key) # To skip some function call overhead we optimize the fast paths where # the JSON key separator is ": " or just ":". if s[end:end + 1] != ':': end = _w(s, end).end() if s[end:end + 1] != ':': raise ValueError(errmsg("Expecting : delimiter", s, end)) end += 1 try: if s[end] in _ws: end += 1 if s[end] in _ws: end = _w(s, end + 1).end() except IndexError: pass try: value, end = scan_once(s, end) except StopIteration: raise ValueError(errmsg("Expecting object", s, end)) pairs_append((key, value)) try: nextchar = s[end] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end] except IndexError: nextchar = '' end += 1 if nextchar == '}': break elif nextchar != ',': raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) end = _w(s, end).end() nextchar = s[end:end + 1] end += 1 if nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end - 1)) if object_pairs_hook is not None: result = object_pairs_hook(pairs) return result, end pairs = dict(pairs) if object_hook is not None: pairs = object_hook(pairs) return pairs, end def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): s, end = s_and_end values = [] nextchar = s[end:end + 1] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end:end + 1] # Look-ahead for trivial empty array if nextchar == ']': return values, end + 1 _append = values.append while True: try: value, end = scan_once(s, end) except StopIteration: raise ValueError(errmsg("Expecting object", s, end)) _append(value) nextchar = s[end:end + 1] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end:end + 1] end += 1 if nextchar == ']': break elif nextchar != ',': raise ValueError(errmsg("Expecting , delimiter", s, end)) try: if s[end] in _ws: end += 1 if s[end] in _ws: end = _w(s, end + 1).end() except IndexError: pass return values, end class JSONDecoder(object): """Simple JSON <http://json.org> decoder Performs the following translations in decoding by default: +---------------+-------------------+ | JSON | Python | +===============+===================+ | object | dict | +---------------+-------------------+ | array | list | +---------------+-------------------+ | string | str | +---------------+-------------------+ | number (int) | int | +---------------+-------------------+ | number (real) | float | +---------------+-------------------+ | true | True | +---------------+-------------------+ | false | False | +---------------+-------------------+ | null | None | +---------------+-------------------+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their corresponding ``float`` values, which is outside the JSON spec. """ def __init__(self, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True, object_pairs_hook=None): """``object_hook``, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given ``dict``. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). ``object_pairs_hook``, if specified will be called with the result of every JSON object decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. If ``strict`` is false (true is the default), then control characters will be allowed inside strings. Control characters in this context are those with character codes in the 0-31 range, including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``. """ self.object_hook = object_hook self.parse_float = parse_float or float self.parse_int = parse_int or int self.parse_constant = parse_constant or _CONSTANTS.__getitem__ self.strict = strict self.object_pairs_hook = object_pairs_hook self.parse_object = JSONObject self.parse_array = JSONArray self.parse_string = scanstring self.memo = {} self.scan_once = scanner.make_scanner(self) def decode(self, s, _w=WHITESPACE.match): """Return the Python representation of ``s`` (a ``str`` instance containing a JSON document). """ obj, end = self.raw_decode(s, idx=_w(s, 0).end()) end = _w(s, end).end() if end != len(s): raise ValueError(errmsg("Extra data", s, end, len(s))) return obj def raw_decode(self, s, idx=0): """Decode a JSON document from ``s`` (a ``str`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. This can be used to decode a JSON document from a string that may have extraneous data at the end. """ try: obj, end = self.scan_once(s, idx) except StopIteration: raise ValueError("No JSON object could be decoded") return obj, end
apache-2.0
l0b0/cds-invenio-vengmark
modules/websubmit/lib/websubmit_engine.py
3
75493
## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebSubmit: the mechanism for the submission of new records into CDS Invenio via a Web interface. """ __revision__ = "$Id$" ## import interesting modules: import string import os import sys import time import types import re import pprint from urllib import quote_plus from cgi import escape from invenio.config import \ CFG_BINDIR, \ CFG_SITE_LANG, \ CFG_SITE_NAME, \ CFG_SITE_URL, \ CFG_PYLIBDIR, \ CFG_WEBSUBMIT_STORAGEDIR, \ CFG_VERSION from invenio.dbquery import run_sql, Error from invenio.access_control_engine import acc_authorize_action from invenio.access_control_admin import acc_is_role from invenio.webpage import page, create_error_box from invenio.webuser import getUid, get_email, collect_user_info from invenio.websubmit_config import * from invenio.messages import gettext_set_language, wash_language from invenio.errorlib import register_exception from invenio.websubmitadmin_engine import string_is_alphanumeric_including_underscore from invenio.websubmit_dblayer import \ get_storage_directory_of_action, \ get_longname_of_doctype, \ get_longname_of_action, \ get_num_pages_of_submission, \ get_parameter_value_for_doctype, \ submission_exists_in_log, \ log_new_pending_submission, \ log_new_completed_submission, \ update_submission_modified_date_in_log, \ update_submission_reference_in_log, \ update_submission_reference_and_status_in_log, \ get_form_fields_on_submission_page, \ get_element_description, \ get_element_check_description, \ get_form_fields_not_on_submission_page, \ function_step_is_last, \ get_collection_children_of_submission_collection, \ get_submission_collection_name, \ get_doctype_children_of_submission_collection, \ get_categories_of_doctype, \ get_doctype_details, \ get_actions_on_submission_page_for_doctype, \ get_action_details, \ get_parameters_of_function, \ get_details_of_submission, \ get_functions_for_submission_step, \ get_submissions_at_level_X_with_score_above_N, \ submission_is_finished import invenio.template websubmit_templates = invenio.template.load('websubmit') def interface(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG, doctype="", act="", startPg=1, access="", mainmenu="", fromdir="", nextPg="", nbPg="", curpage=1): """This function is called after a user has visited a document type's "homepage" and selected the type of "action" to perform. Having clicked an action-button (e.g. "Submit a New Record"), this function will be called . It performs the task of initialising a new submission session (retrieving information about the submission, creating a working submission-directory, etc), and "drawing" a submission page containing the WebSubmit form that the user uses to input the metadata to be submitted. When a user moves between pages in the submission interface, this function is recalled so that it can save the metadata entered into the previous page by the user, and draw the current submission-page. Note: During a submission, for each page refresh, this function will be called while the variable "step" (a form variable, seen by websubmit_webinterface, which calls this function) is 0 (ZERO). In other words, this function handles the FRONT-END phase of a submission, BEFORE the WebSubmit functions are called. @param req: (apache request object) *** NOTE: Added into this object, is a variable called "form" (req.form). This is added into the object in the index function of websubmit_webinterface. It contains a "mod_python.util.FieldStorage" instance, that contains the form-fields found on the previous submission page. @param c: (string), defaulted to CFG_SITE_NAME. The name of the CDS Invenio installation. @param ln: (string), defaulted to CFG_SITE_LANG. The language in which to display the pages. @param doctype: (string) - the doctype ID of the doctype for which the submission is being made. @param act: (string) - The ID of the action being performed (e.g. submission of bibliographic information; modification of bibliographic information, etc). @param startPg: (integer) - Starting page for the submission? Defaults to 1. @param indir: (string) - the directory used to store all submissions of the given "type" of this submission. For example, if the submission is of the type "modify bibliographic information", this variable would contain "modify". @param access: (string) - the "access" number for the submission (e.g. 1174062451_7010). This number is also used as the name for the current working submission directory. @param mainmenu: (string) - contains the URL (minus the CDS Invenio home stem) for the submission's home-page. (E.g. If this submission is "PICT", the "mainmenu" file would contain "/submit?doctype=PICT". @param fromdir: (integer) @param nextPg: (string) @param nbPg: (string) @param curpage: (integer) - the current submission page number. Defaults to 1. """ ln = wash_language(ln) # load the right message language _ = gettext_set_language(ln) sys.stdout = req # get user ID: user_info = collect_user_info(req) uid = user_info['uid'] uid_email = user_info['email'] # variable initialisation t = "" field = [] fieldhtml = [] level = [] fullDesc = [] text = [] check = [] select = [] radio = [] upload = [] txt = [] noPage = [] # Preliminary tasks # check that the user is logged in if not uid_email or uid_email == "guest": return warningMsg(websubmit_templates.tmpl_warning_message( ln = ln, msg = _("Sorry, you must log in to perform this action.") ), req, ln) # warningMsg("""<center><font color="red"></font></center>""",req, ln) # check we have minimum fields if not doctype or not act or not access: ## We don't have all the necessary information to go ahead ## with this submission: return warningMsg(_("Not enough information to go ahead with the submission."), req, c, ln) try: assert(not access or re.match('\d+_\d+', access)) except AssertionError: register_exception(req=req, prefix='doctype="%s", access="%s"' % (doctype, access)) return warningMsg(_("Invalid parameters"), req, c, ln) if doctype and act: ## Let's clean the input details = get_details_of_submission(doctype, act) if not details: return warningMsg(_("Invalid doctype and act parameters"), req, c, ln) doctype = details[0] act = details[1] ## Before continuing to display the submission form interface, ## verify that this submission has not already been completed: if submission_is_finished(doctype, act, access, uid_email): ## This submission has already been completed. ## This situation can arise when, having completed a submission, ## the user uses the browser's back-button to go back to the form ## stage of the submission and then tries to submit once more. ## This is unsafe and should not be allowed. Instead of re-displaying ## the submission forms, display an error message to the user: wrnmsg = """<b>This submission has been completed. Please go to the""" \ """ <a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s">""" \ """main menu</a> to start a new submission.</b>""" \ % { 'doctype' : quote_plus(doctype), 'ln' : ln } return warningMsg(wrnmsg, req) ## retrieve the action and doctype data: ## Concatenate action ID and doctype ID to make the submission ID: subname = "%s%s" % (act, doctype) ## Get the submission storage directory from the DB: submission_dir = get_storage_directory_of_action(act) if submission_dir: indir = submission_dir else: ## Unable to determine the submission-directory: return warningMsg(_("Unable to find the submission directory for the action: %s") % escape(str(act)), req, c, ln) ## get the document type's long-name: doctype_lname = get_longname_of_doctype(doctype) if doctype_lname is not None: ## Got the doctype long-name: replace spaces with HTML chars: docname = doctype_lname.replace(" ", "&nbsp;") else: ## Unknown document type: return warningMsg(_("Unknown document type"), req, c, ln) ## get the action's long-name: actname = get_longname_of_action(act) if actname is None: ## Unknown action: return warningMsg(_("Unknown action"), req, c, ln) ## Get the number of pages for this submission: num_submission_pages = get_num_pages_of_submission(subname) if num_submission_pages is not None: nbpages = num_submission_pages else: ## Unable to determine the number of pages for this submission: return warningMsg(_("Unable to determine the number of submission pages."), req, c, ln) ## If unknown, get the current page of submission: if startPg != "" and curpage in ("", 0): curpage = startPg ## retrieve the name of the file in which the reference of ## the submitted document will be stored rn_filename = get_parameter_value_for_doctype(doctype, "edsrn") if rn_filename is not None: edsrn = rn_filename else: ## Unknown value for edsrn - set it to an empty string: edsrn = "" ## This defines the path to the directory containing the action data curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, indir, doctype, access) try: assert(curdir == os.path.abspath(curdir)) except AssertionError: register_exception(req=req, prefix='indir="%s", doctype="%s", access="%s"' % (indir, doctype, access)) return warningMsg(_("Invalid parameters"), req, c, ln) ## if this submission comes from another one (fromdir is then set) ## We retrieve the previous submission directory and put it in the proper one if fromdir != "": olddir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, fromdir, doctype, access) try: assert(olddir == os.path.abspath(olddir)) except AssertionError: register_exception(req=req, prefix='fromdir="%s", doctype="%s", access="%s"' % (fromdir, doctype, access)) return warningMsg(_("Invalid parameters"), req, c, ln) if os.path.exists(olddir): os.rename(olddir, curdir) ## If the submission directory still does not exist, we create it if not os.path.exists(curdir): try: os.makedirs(curdir) except Exception, e: register_exception(req=req, alert_admin=True) return warningMsg(_("Unable to create a directory for this submission. The administrator has been alerted."), req, c, ln) # retrieve the original main menu url and save it in the "mainmenu" file if mainmenu != "": fp = open(os.path.join(curdir, "mainmenu"), "w") fp.write(mainmenu) fp.close() # and if the file containing the URL to the main menu exists # we retrieve it and store it in the $mainmenu variable if os.path.exists(os.path.join(curdir, "mainmenu")): fp = open(os.path.join(curdir, "mainmenu"), "r"); mainmenu = fp.read() fp.close() else: mainmenu = "%s/submit" % (CFG_SITE_URL,) # various authentication related tasks... if uid_email != "guest" and uid_email != "": #First save the username (email address) in the SuE file. This way bibconvert will be able to use it if needed fp = open(os.path.join(curdir, "SuE"), "w") fp.write(uid_email) fp.close() if os.path.exists(os.path.join(curdir, "combo%s" % doctype)): fp = open(os.path.join(curdir, "combo%s" % doctype), "r"); categ = fp.read() fp.close() else: categ = req.form.get('combo%s' % doctype, '*') # is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(req, "submit", verbose=0, doctype=doctype, act=act, categ=categ) if acc_is_role("submit", doctype=doctype, act=act) and auth_code != 0: return warningMsg("""<center><font color="red">%s</font></center>""" % auth_message, req) ## update the "journal of submission": ## Does the submission already exist in the log? submission_exists = \ submission_exists_in_log(doctype, act, access, uid_email) if submission_exists == 1: ## update the modification-date of this submission in the log: update_submission_modified_date_in_log(doctype, act, access, uid_email) else: ## Submission doesn't exist in log - create it: log_new_pending_submission(doctype, act, access, uid_email) ## Let's write in curdir file under curdir the curdir value ## in case e.g. it is needed in FFT. fp = open(os.path.join(curdir, "curdir"), "w") fp.write(curdir) fp.close() # Save the form fields entered in the previous submission page # If the form was sent with the GET method form = dict(req.form) value = "" # we parse all the form variables for key, formfields in form.items(): filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) try: assert(file_to_open == os.path.abspath(file_to_open)) except AssertionError: register_exception(req=req, prefix='curdir="%s", filename="%s"' % (curdir, filename)) return warningMsg(_("Invalid parameters"), req, c, ln) # Do not write reserved filenames to disk if filename in CFG_RESERVED_SUBMISSION_FILENAMES: # Unless there is really an element with that name in the # interface, which means that admin authorized it if not filename in [submission_field[3] for submission_field in get_form_fields_on_submission_page(subname, curpage)]: # Still this will filter out reserved field names that # might have been called by functions such as # Create_Modify_Interface function in MBI step, or # dynamic fields in response elements, but that is # unlikely to be a problem. continue # Skip variables containing characters that are not allowed in # WebSubmit elements if not string_is_alphanumeric_including_underscore(filename): continue # the field is an array if isinstance(formfields, types.ListType): fp = open(file_to_open, "w") for formfield in formfields: #stripslashes(value) value = specialchars(formfield) fp.write(value+"\n") fp.close() # the field is a normal string elif isinstance(formfields, types.StringTypes) and formfields != "": value = formfields fp = open(file_to_open, "w") fp.write(specialchars(value)) fp.close() # the field is a file elif hasattr(formfields,"filename") and formfields.filename: dir_to_open = os.path.join(curdir, 'files', key) try: assert(dir_to_open == os.path.abspath(dir_to_open)) assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) return warningMsg(_("Invalid parameters"), req, c, ln) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except: register_exception(req=req, alert_admin=True) return warningMsg(_("Cannot create submission directory. The administrator has been alerted."), req, c, ln) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # This may be dangerous if the file size is bigger than the available memory fp = open(os.path.join(dir_to_open, filename), "w") while formfields.file: fp.write(formfields.file.read(10240)) fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() else: return warningMsg(_("No file uploaded?"), req, c, ln) ## if the found field is the reference of the document, ## save this value in the "journal of submissions": if uid_email != "" and uid_email != "guest": if key == edsrn: update_submission_reference_in_log(doctype, access, uid_email, value) ## create the interface: subname = "%s%s" % (act, doctype) ## Get all of the form fields that appear on this page, ordered by fieldnum: form_fields = get_form_fields_on_submission_page(subname, curpage) full_fields = [] values = [] the_globals = { 'doctype' : doctype, 'action' : action, 'access' : access, 'ln' : ln, 'curdir' : curdir, 'uid' : uid, 'uid_email' : uid_email, 'form' : form, 'act' : act, 'req' : req, 'user_info' : user_info, 'InvenioWebSubmitFunctionError' : InvenioWebSubmitFunctionError, '__websubmit_in_jail__' : True, '__builtins__' : globals()['__builtins__'] } for field_instance in form_fields: full_field = {} ## Retrieve the field's description: element_descr = get_element_description(field_instance[3]) try: assert(element_descr is not None) except AssertionError: msg = _("Unknown form field found on submission page.") register_exception(req=req, alert_admin=True, prefix=msg) ## The form field doesn't seem to exist - return with error message: return warningMsg(_("Unknown form field found on submission page."), req, c, ln) if element_descr[8] is None: val = "" else: val = element_descr[8] ## we also retrieve and add the javascript code of the checking function, if needed ## Set it to empty string to begin with: full_field['javascript'] = '' if field_instance[7] != '': check_descr = get_element_check_description(field_instance[7]) if check_descr is not None: ## Retrieved the check description: full_field['javascript'] = check_descr full_field['type'] = element_descr[3] full_field['name'] = field_instance[3] full_field['rows'] = element_descr[5] full_field['cols'] = element_descr[6] full_field['val'] = val full_field['size'] = element_descr[4] full_field['maxlength'] = element_descr[7] full_field['htmlcode'] = element_descr[9] full_field['typename'] = field_instance[1] ## TODO: Investigate this, Not used? ## It also seems to refer to pagenum. # The 'R' fields must be executed in the engine's environment, # as the runtime functions access some global and local # variables. if full_field ['type'] == 'R': try: co = compile (full_field ['htmlcode'].replace("\r\n","\n"), "<string>", "exec") the_globals['text'] = '' exec co in the_globals text = the_globals['text'] except: register_exception(req=req, alert_admin=True, prefix="Error in evaluating response element %s with globals %s" % (pprint.pformat(full_field), pprint.pformat(the_globals))) raise else: text = websubmit_templates.tmpl_submit_field (ln = ln, field = full_field) # we now determine the exact type of the created field if full_field['type'] not in [ 'D','R']: field.append(full_field['name']) level.append(field_instance[5]) fullDesc.append(field_instance[4]) txt.append(field_instance[6]) check.append(field_instance[7]) # If the field is not user-defined, we try to determine its type # (select, radio, file upload...) # check whether it is a select field or not if re.search("SELECT", text, re.IGNORECASE) is not None: select.append(1) else: select.append(0) # checks whether it is a radio field or not if re.search(r"TYPE=[\"']?radio", text, re.IGNORECASE) is not None: radio.append(1) else: radio.append(0) # checks whether it is a file upload or not if re.search(r"TYPE=[\"']?file", text, re.IGNORECASE) is not None: upload.append(1) else: upload.append(0) # if the field description contains the "<COMBO>" string, replace # it by the category selected on the document page submission page combofile = "combo%s" % doctype if os.path.exists("%s/%s" % (curdir, combofile)): f = open("%s/%s" % (curdir, combofile), "r") combo = f.read() f.close() else: combo="" text = text.replace("<COMBO>", combo) # if there is a <YYYY> tag in it, replace it by the current year year = time.strftime("%Y"); text = text.replace("<YYYY>", year) # if there is a <TODAY> tag in it, replace it by the current year today = time.strftime("%d/%m/%Y"); text = text.replace("<TODAY>", today) fieldhtml.append(text) else: select.append(0) radio.append(0) upload.append(0) # field.append(value) - initial version, not working with JS, taking a submitted value field.append(field_instance[3]) level.append(field_instance[5]) txt.append(field_instance[6]) fullDesc.append(field_instance[4]) check.append(field_instance[7]) fieldhtml.append(text) full_field['fullDesc'] = field_instance[4] full_field['text'] = text # If a file exists with the name of the field we extract the saved value text = '' if os.path.exists(os.path.join(curdir, full_field['name'])): file = open(os.path.join(curdir, full_field['name']), "r"); text = file.read() text = re.compile("[\n\r]*$").sub("", text) text = re.compile("\n").sub("\\n", text) text = re.compile("\r").sub("", text) file.close() values.append(text) full_fields.append(full_field) returnto = {} if int(curpage) == int(nbpages): subname = "%s%s" % (act, doctype) other_form_fields = \ get_form_fields_not_on_submission_page(subname, curpage) nbFields = 0 message = "" fullcheck_select = [] fullcheck_radio = [] fullcheck_upload = [] fullcheck_field = [] fullcheck_level = [] fullcheck_txt = [] fullcheck_noPage = [] fullcheck_check = [] for field_instance in other_form_fields: if field_instance[5] == "M": ## If this field is mandatory, get its description: element_descr = get_element_description(field_instance[3]) try: assert(element_descr is not None) except AssertionError: msg = _("Unknown form field found on submission page.") register_exception(req=req, alert_admin=True, prefix=msg) ## The form field doesn't seem to exist - return with error message: return warningMsg(_("Unknown form field found on submission page."), req, c, ln) if element_descr[3] in ['D', 'R']: if element_descr[3] == "D": text = element_descr[9] else: text = eval(element_descr[9]) formfields = text.split(">") for formfield in formfields: match = re.match("name=([^ <>]+)", formfield, re.IGNORECASE) if match is not None: names = match.groups for value in names: if value != "": value = re.compile("[\"']+").sub("", value) fullcheck_field.append(value) fullcheck_level.append(field_instance[5]) fullcheck_txt.append(field_instance[6]) fullcheck_noPage.append(field_instance[1]) fullcheck_check.append(field_instance[7]) nbFields = nbFields + 1 else: fullcheck_noPage.append(field_instance[1]) fullcheck_field.append(field_instance[3]) fullcheck_level.append(field_instance[5]) fullcheck_txt.append(field_instance[6]) fullcheck_check.append(field_instance[7]) nbFields = nbFields+1 # tests each mandatory field fld = 0 res = 1 for i in xrange(nbFields): res = 1 if not os.path.exists(os.path.join(curdir, fullcheck_field[i])): res=0 else: file = open(os.path.join(curdir, fullcheck_field[i]), "r") text = file.read() if text == '': res=0 else: if text == "Select:": res=0 if res == 0: fld = i break if not res: returnto = { 'field' : fullcheck_txt[fld], 'page' : fullcheck_noPage[fld], } t += websubmit_templates.tmpl_page_interface( ln = ln, docname = docname, actname = actname, curpage = curpage, nbpages = nbpages, nextPg = nextPg, access = access, nbPg = nbPg, doctype = doctype, act = act, fields = full_fields, javascript = websubmit_templates.tmpl_page_interface_js( ln = ln, upload = upload, field = field, fieldhtml = fieldhtml, txt = txt, check = check, level = level, curdir = curdir, values = values, select = select, radio = radio, curpage = curpage, nbpages = nbpages, returnto = returnto, ), mainmenu = mainmenu, ) t += websubmit_templates.tmpl_page_do_not_leave_submission_js(ln) # start display: req.content_type = "text/html" req.send_http_header() p_navtrail = """<a href="/submit?ln=%(ln)s" class="navtrail">%(submit)s</a>&nbsp;>&nbsp;<a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s" class="navtrail">%(docname)s</a>&nbsp;""" % { 'submit' : _("Submit"), 'doctype' : quote_plus(doctype), 'docname' : docname, 'ln' : ln } return page(title= actname, body = t, navtrail = p_navtrail, description = "submit documents", keywords = "submit", uid = uid, language = ln, req = req, navmenuid='submit') def endaction(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG, doctype="", act="", startPg=1, access="", mainmenu="", fromdir="", nextPg="", nbPg="", curpage=1, step=1, mode="U"): """Having filled-in the WebSubmit form created for metadata by the interface function, the user clicks a button to either "finish the submission" or to "proceed" to the next stage of the submission. At this point, a variable called "step" will be given a value of 1 or above, which means that this function is called by websubmit_webinterface. So, during all non-zero steps of the submission, this function is called. In other words, this function is called during the BACK-END phase of a submission, in which WebSubmit *functions* are being called. The function first ensures that all of the WebSubmit form field values have been saved in the current working submission directory, in text- files with the same name as the field elements have. It then determines the functions to be called for the given step of the submission, and executes them. Following this, if this is the last step of the submission, it logs the submission as "finished" in the journal of submissions. @param req: (apache request object) *** NOTE: Added into this object, is a variable called "form" (req.form). This is added into the object in the index function of websubmit_webinterface. It contains a "mod_python.util.FieldStorage" instance, that contains the form-fields found on the previous submission page. @param c: (string), defaulted to CFG_SITE_NAME. The name of the CDS Invenio installation. @param ln: (string), defaulted to CFG_SITE_LANG. The language in which to display the pages. @param doctype: (string) - the doctype ID of the doctype for which the submission is being made. @param act: (string) - The ID of the action being performed (e.g. submission of bibliographic information; modification of bibliographic information, etc). @param startPg: (integer) - Starting page for the submission? Defaults to 1. @param indir: (string) - the directory used to store all submissions of the given "type" of this submission. For example, if the submission is of the type "modify bibliographic information", this variable would contain "modify". @param access: (string) - the "access" number for the submission (e.g. 1174062451_7010). This number is also used as the name for the current working submission directory. @param mainmenu: (string) - contains the URL (minus the CDS Invenio home stem) for the submission's home-page. (E.g. If this submission is "PICT", the "mainmenu" file would contain "/submit?doctype=PICT". @param fromdir: @param nextPg: @param nbPg: @param curpage: (integer) - the current submission page number. Defaults to 1. @param step: (integer) - the current step of the submission. Defaults to 1. @param mode: """ # load the right message language _ = gettext_set_language(ln) try: rn except NameError: rn = "" dismode = mode ln = wash_language(ln) sys.stdout = req t = "" # get user ID: uid = getUid(req) uid_email = get_email(uid) # Preliminary tasks # check that the user is logged in if uid_email == "" or uid_email == "guest": return warningMsg(websubmit_templates.tmpl_warning_message( ln = ln, msg = _("Sorry, you must log in to perform this action.") ), req, ln) ## check we have minimum fields if not doctype or not act or not access: ## We don't have all the necessary information to go ahead ## with this submission: return warningMsg(_("Not enough information to go ahead with the submission."), req, c, ln) if doctype and act: ## Let's clean the input details = get_details_of_submission(doctype, act) if not details: return warningMsg(_("Invalid doctype and act parameters"), req, c, ln) doctype = details[0] act = details[1] try: assert(not access or re.match('\d+_\d+', access)) except AssertionError: register_exception(req=req, prefix='doctype="%s", access="%s"' % (doctype, access)) return warningMsg(_("Invalid parameters"), req, c, ln) ## Before continuing to process the submitted data, verify that ## this submission has not already been completed: if submission_is_finished(doctype, act, access, uid_email): ## This submission has already been completed. ## This situation can arise when, having completed a submission, ## the user uses the browser's back-button to go back to the form ## stage of the submission and then tries to submit once more. ## This is unsafe and should not be allowed. Instead of re-processing ## the submitted data, display an error message to the user: wrnmsg = """<b>This submission has been completed. Please go to the""" \ """ <a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s">""" \ """main menu</a> to start a new submission.</b>""" \ % { 'doctype' : quote_plus(doctype), 'ln' : ln } return warningMsg(wrnmsg, req) ## Get the number of pages for this submission: subname = "%s%s" % (act, doctype) ## retrieve the action and doctype data ## Get the submission storage directory from the DB: submission_dir = get_storage_directory_of_action(act) if submission_dir: indir = submission_dir else: ## Unable to determine the submission-directory: return warningMsg(_("Unable to find the submission directory for the action: %s") % escape(str(act)), req, c, ln) # The following words are reserved and should not be used as field names reserved_words = ["stop", "file", "nextPg", "startPg", "access", "curpage", "nbPg", "act", \ "indir", "doctype", "mode", "step", "deleted", "file_path", "userfile_name"] # This defines the path to the directory containing the action data curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, indir, doctype, access) try: assert(curdir == os.path.abspath(curdir)) except AssertionError: register_exception(req=req, prefix='indir="%s", doctype=%s, access=%s' % (indir, doctype, access)) return warningMsg(_("Invalid parameters"), req, c, ln) ## If the submission directory still does not exist, we create it if not os.path.exists(curdir): try: os.makedirs(curdir) except Exception, e: register_exception(req=req, alert_admin=True) return warningMsg(_("Unable to create a directory for this submission. The administrator has been alerted."), req, c, ln) # retrieve the original main menu url ans save it in the "mainmenu" file if mainmenu != "": fp = open(os.path.join(curdir, "mainmenu"), "w") fp.write(mainmenu) fp.close() # and if the file containing the URL to the main menu exists # we retrieve it and store it in the $mainmenu variable if os.path.exists(os.path.join(curdir, "mainmenu")): fp = open(os.path.join(curdir, "mainmenu"), "r"); mainmenu = fp.read() fp.close() else: mainmenu = "%s/submit" % (CFG_SITE_URL,) ## retrieve the name of the file in which the reference of ## the submitted document will be stored rn_filename = get_parameter_value_for_doctype(doctype, "edsrn") if rn_filename is not None: edsrn = rn_filename else: ## Unknown value for edsrn - set it to an empty string: edsrn = "" ## Determine whether the action is finished ## (ie there are no other steps after the current one): finished = function_step_is_last(doctype, act, step) ## Let's write in curdir file under curdir the curdir value ## in case e.g. it is needed in FFT. fp = open(os.path.join(curdir, "curdir"), "w") fp.write(curdir) fp.close() # Save the form fields entered in the previous submission page # If the form was sent with the GET method form = req.form value = "" # we parse all the form variables for key in form.keys(): formfields = form[key] filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) try: assert(file_to_open == os.path.abspath(file_to_open)) assert(file_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", filename="%s"' % (curdir, filename)) return warningMsg(_("Invalid parameters"), req, c, ln) # Do not write reserved filenames to disk if filename in CFG_RESERVED_SUBMISSION_FILENAMES: # Unless there is really an element with that name in the # interface, which means that admin authorized it if not filename in [submission_field[3] for submission_field in get_form_fields_on_submission_page(subname, curpage)]: # Still this will filter out reserved field names that # might have been called by functions such as # Create_Modify_Interface function in MBI step, or # dynamic fields in response elements, but that is # unlikely to be a problem. continue # Skip variables containing characters that are not allowed in # WebSubmit elements if not string_is_alphanumeric_including_underscore(filename): continue # the field is an array if isinstance(formfields,types.ListType): fp = open(file_to_open, "w") for formfield in formfields: #stripslashes(value) value = specialchars(formfield) fp.write(value+"\n") fp.close() # the field is a normal string elif isinstance(formfields, types.StringTypes) and formfields != "": value = formfields fp = open(file_to_open, "w") fp.write(specialchars(value)) fp.close() # the field is a file elif hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.join(curdir, 'files', key) try: assert(dir_to_open == os.path.abspath(dir_to_open)) assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) return warningMsg(_("Invalid parameters"), req, c, ln) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except: register_exception(req=req, alert_admin=True) return warningMsg(_("Cannot create submission directory. The administrator has been alerted."), req, c, ln) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # This may be dangerous if the file size is bigger than the available memory data = formfields.file.read() fp = open(os.path.join(dir_to_open, filename), "w") fp.write(data) fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() else: return warningMsg(_("No file uploaded?"), req, c, ln) ## if the found field is the reference of the document ## we save this value in the "journal of submissions" if uid_email != "" and uid_email != "guest": if key == edsrn: update_submission_reference_in_log(doctype, access, uid_email, value) ## get the document type's long-name: doctype_lname = get_longname_of_doctype(doctype) if doctype_lname is not None: ## Got the doctype long-name: replace spaces with HTML chars: docname = doctype_lname.replace(" ", "&nbsp;") else: ## Unknown document type: return warningMsg(_("Unknown document type"), req, c, ln) ## get the action's long-name: actname = get_longname_of_action(act) if actname is None: ## Unknown action: return warningMsg(_("Unknown action"), req, c, ln) num_submission_pages = get_num_pages_of_submission(subname) if num_submission_pages is not None: nbpages = num_submission_pages else: ## Unable to determine the number of pages for this submission: return warningMsg(_("Unable to determine the number of submission pages."), \ req, CFG_SITE_NAME, ln) ## Determine whether the action is finished ## (ie there are no other steps after the current one): last_step = function_step_is_last(doctype, act, step) next_action = '' ## The next action to be proposed to the user # Prints the action details, returning the mandatory score action_score = action_details(doctype, act) current_level = get_level(doctype, act) # Calls all the function's actions function_content = '' try: ## Handle the execution of the functions for this ## submission/step: start_time = time.time() (function_content, last_step, action_score) = print_function_calls( req=req, doctype=doctype, action=act, step=step, form=form, start_time=start_time, access=access, curdir=curdir, dismode=mode, rn=rn, last_step=last_step, action_score=action_score, ln=ln) except InvenioWebSubmitFunctionError, e: register_exception(req=req, alert_admin=True, prefix='doctype="%s", action="%s", step="%s", form="%s", start_time="%s"' % (doctype, act, step, form, start_time)) ## There was a serious function-error. Execution ends. return warningMsg(_("A serious function-error has been encountered. Adminstrators have been alerted. <br /><em>Please not that this might be due to wrong characters inserted into the form</em> (e.g. by copy and pasting some text from a PDF file)."), req, c, ln) except InvenioWebSubmitFunctionStop, e: ## For one reason or another, one of the functions has determined that ## the data-processing phase (i.e. the functions execution) should be ## halted and the user should be returned to the form interface once ## more. (NOTE: Redirecting the user to the Web-form interface is ## currently done using JavaScript. The "InvenioWebSubmitFunctionStop" ## exception contains a "value" string, which is effectively JavaScript ## - probably an alert box and a form that is submitted). **THIS WILL ## CHANGE IN THE FUTURE WHEN JavaScript IS REMOVED!** if e.value is not None: function_content = e.value else: function_content = e else: ## No function exceptions (InvenioWebSubmitFunctionStop, ## InvenioWebSubmitFunctionError) were raised by the functions. Propose ## the next action (if applicable), and log the submission as finished: ## If the action was mandatory we propose the next ## mandatory action (if any) if action_score != -1 and last_step == 1: next_action = Propose_Next_Action(doctype, \ action_score, \ access, \ current_level, \ indir) ## If we are in the last step of an action, we can update ## the "journal of submissions" if last_step == 1: if uid_email != "" and uid_email != "guest" and rn != "": ## update the "journal of submission": ## Does the submission already exist in the log? submission_exists = \ submission_exists_in_log(doctype, act, access, uid_email) if submission_exists == 1: ## update the rn and status to finished for this submission ## in the log: update_submission_reference_and_status_in_log(doctype, \ act, \ access, \ uid_email, \ rn, \ "finished") else: ## Submission doesn't exist in log - create it: log_new_completed_submission(doctype, \ act, \ access, \ uid_email, \ rn) ## Having executed the functions, create the page that will be displayed ## to the user: t = websubmit_templates.tmpl_page_endaction( ln = ln, # these fields are necessary for the navigation nextPg = nextPg, startPg = startPg, access = access, curpage = curpage, nbPg = nbPg, nbpages = nbpages, doctype = doctype, act = act, docname = docname, actname = actname, mainmenu = mainmenu, finished = finished, function_content = function_content, next_action = next_action, ) if not finished: t += websubmit_templates.tmpl_page_do_not_leave_submission_js(ln) # start display: req.content_type = "text/html" req.send_http_header() p_navtrail = '<a href="/submit?ln='+ln+'" class="navtrail">' + _("Submit") +\ """</a>&nbsp;>&nbsp;<a href="/submit?doctype=%(doctype)s&amp;ln=%(ln)s" class="navtrail">%(docname)s</a>""" % { 'doctype' : quote_plus(doctype), 'docname' : docname, 'ln' : ln, } return page(title= actname, body = t, navtrail = p_navtrail, description="submit documents", keywords="submit", uid = uid, language = ln, req = req, navmenuid='submit') def home(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG): """This function generates the WebSubmit "home page". Basically, this page contains a list of submission-collections in WebSubmit, and gives links to the various document-type submissions. Document-types only appear on this page when they have been connected to a submission-collection in WebSubmit. @param req: (apache request object) @param c: (string) - defaults to CFG_SITE_NAME @param ln: (string) - The CDS Invenio interface language of choice. Defaults to CFG_SITE_LANG (the default language of the installation). @return: (string) - the Web page to be displayed. """ ln = wash_language(ln) # get user ID: try: uid = getUid(req) except Error, e: return errorMsg(e, req, c, ln) # load the right message language _ = gettext_set_language(ln) user_info = collect_user_info(req) finaltext = websubmit_templates.tmpl_submit_home_page( ln = ln, catalogues = makeCataloguesTable(user_info, ln) ) return page(title=_("Submit"), body=finaltext, navtrail=[], description="submit documents", keywords="submit", uid=uid, language=ln, req=req, navmenuid='submit' ) def makeCataloguesTable(user_info, ln=CFG_SITE_LANG): """Build the 'catalogues' (submission-collections) tree for the WebSubmit home-page. This tree contains the links to the various document types in WebSubmit. @param user_info: (dict) - the user information in order to decide whether to display a submission. @param ln: (string) - the language of the interface. (defaults to 'CFG_SITE_LANG'). @return: (string) - the submission-collections tree. """ text = "" catalogues = [] ## Get the submission-collections attached at the top level ## of the submission-collection tree: top_level_collctns = get_collection_children_of_submission_collection(0) if len(top_level_collctns) != 0: ## There are submission-collections attatched to the top level. ## retrieve their details for displaying: for child_collctn in top_level_collctns: catalogues.append(getCatalogueBranch(child_collctn[0], 1, user_info)) text = websubmit_templates.tmpl_submit_home_catalogs( ln=ln, catalogs=catalogues ) else: text = websubmit_templates.tmpl_submit_home_catalog_no_content(ln=ln) return text def getCatalogueBranch(id_father, level, user_info): """Build up a given branch of the submission-collection tree. I.e. given a parent submission-collection ID, build up the tree below it. This tree will include doctype-children, as well as other submission- collections and their children. Finally, return the branch as a dictionary. @param id_father: (integer) - the ID of the submission-collection from which to begin building the branch. @param level: (integer) - the level of the current submission- collection branch. @param user_info: (dict) - the user information in order to decide whether to display a submission. @return: (dictionary) - the branch and its sub-branches. """ elem = {} ## The dictionary to contain this branch of the tree. ## First, get the submission-collection-details: collctn_name = get_submission_collection_name(id_father) if collctn_name is not None: ## Got the submission-collection's name: elem['name'] = collctn_name else: ## The submission-collection is unknown to the DB ## set its name as empty: elem['name'] = "" elem['id'] = id_father elem['level'] = level ## Now get details of the doctype-children of this ## submission-collection: elem['docs'] = [] ## List to hold the doctype-children ## of the submission-collection doctype_children = \ get_doctype_children_of_submission_collection(id_father) for child_doctype in doctype_children: if acc_authorize_action(user_info, 'submit', authorized_if_no_roles=True, doctype=child_doctype[0])[0] == 0: elem['docs'].append(getDoctypeBranch(child_doctype[0])) ## Now, get the collection-children of this submission-collection: elem['sons'] = [] collctn_children = \ get_collection_children_of_submission_collection(id_father) for child_collctn in collctn_children: elem['sons'].append(getCatalogueBranch(child_collctn[0], level + 1, user_info)) ## Now return this branch of the built-up 'collection-tree': return elem def getDoctypeBranch(doctype): """Create a document-type 'leaf-node' for the submission-collections tree. Basically, this leaf is a dictionary containing the name and ID of the document-type submission to which it links. @param doctype: (string) - the ID of the document type. @return: (dictionary) - the document-type 'leaf node'. Contains the following values: + id: (string) - the document-type ID. + name: (string) - the (long) name of the document-type. """ ldocname = get_longname_of_doctype(doctype) if ldocname is None: ldocname = "Unknown Document Type" return { 'id' : doctype, 'name' : ldocname, } def displayCatalogueBranch(id_father, level, catalogues): text = "" collctn_name = get_submission_collection_name(id_father) if collctn_name is None: ## If this submission-collection wasn't known in the DB, ## give it the name "Unknown Submission-Collection" to ## avoid errors: collctn_name = "Unknown Submission-Collection" ## Now, create the display for this submission-collection: if level == 1: text = "<LI><font size=\"+1\"><strong>%s</strong></font>\n" \ % collctn_name else: ## TODO: These are the same (and the if is ugly.) Why? if level == 2: text = "<LI>%s\n" % collctn_name else: if level > 2: text = "<LI>%s\n" % collctn_name ## Now display the children document-types that are attached ## to this submission-collection: ## First, get the children: doctype_children = get_doctype_children_of_submission_collection(id_father) collctn_children = get_collection_children_of_submission_collection(id_father) if len(doctype_children) > 0 or len(collctn_children) > 0: ## There is something to display, so open a list: text = text + "<UL>\n" ## First, add the doctype leaves of this branch: for child_doctype in doctype_children: ## Add the doctype 'leaf-node': text = text + displayDoctypeBranch(child_doctype[0], catalogues) ## Now add the submission-collection sub-branches: for child_collctn in collctn_children: catalogues.append(child_collctn[0]) text = text + displayCatalogueBranch(child_collctn[0], level+1, catalogues) ## Finally, close up the list if there were nodes to display ## at this branch: if len(doctype_children) > 0 or len(collctn_children) > 0: text = text + "</UL>\n" return text def displayDoctypeBranch(doctype, catalogues): text = "" ldocname = get_longname_of_doctype(doctype) if ldocname is None: ldocname = "Unknown Document Type" text = "<LI><a href=\"\" onmouseover=\"javascript:" \ "popUpTextWindow('%s',true,event);\" onmouseout" \ "=\"javascript:popUpTextWindow('%s',false,event);\" " \ "onClick=\"document.forms[0].doctype.value='%s';" \ "document.forms[0].submit();return false;\">%s</a>\n" \ % (doctype, doctype, doctype, ldocname) return text def action(req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG, doctype=""): # load the right message language _ = gettext_set_language(ln) nbCateg = 0 snameCateg = [] lnameCateg = [] actionShortDesc = [] indir = [] actionbutton = [] statustext = [] t = "" ln = wash_language(ln) # get user ID: try: uid = getUid(req) uid_email = get_email(uid) except Error, e: return errorMsg(e, req, c, ln) #parses database to get all data ## first, get the list of categories doctype_categs = get_categories_of_doctype(doctype) for doctype_categ in doctype_categs: nbCateg = nbCateg+1 snameCateg.append(doctype_categ[0]) lnameCateg.append(doctype_categ[1]) ## Now get the details of the document type: doctype_details = get_doctype_details(doctype) if doctype_details is None: ## Doctype doesn't exist - raise error: return warningMsg(_("Unable to find document type: %s") % escape(str(doctype)), req, c, ln) else: docFullDesc = doctype_details[0] # Also update the doctype as returned by the database, since # it might have a differnent case (eg. DemOJrN->demoJRN) doctype = docShortDesc = doctype_details[1] description = doctype_details[4] ## Get the details of the actions supported by this document-type: doctype_actions = get_actions_on_submission_page_for_doctype(doctype) for doctype_action in doctype_actions: ## Get the details of this action: action_details = get_action_details(doctype_action[0]) if action_details is not None: actionShortDesc.append(doctype_action[0]) indir.append(action_details[1]) actionbutton.append(action_details[4]) statustext.append(action_details[5]) ## Send the gathered information to the template so that the doctype's ## home-page can be displayed: t = websubmit_templates.tmpl_action_page( ln=ln, uid=uid, guest=(uid_email == "" or uid_email == "guest"), pid = os.getpid(), now = time.time(), doctype = doctype, description = description, docfulldesc = docFullDesc, snameCateg = snameCateg, lnameCateg = lnameCateg, actionShortDesc = actionShortDesc, indir = indir, # actionbutton = actionbutton, statustext = statustext, ) p_navtrail = """<a href="/submit?ln=%(ln)s" class="navtrail">%(submit)s</a>""" % {'submit' : _("Submit"), 'ln' : ln} return page(title = docFullDesc, body=t, navtrail=p_navtrail, description="submit documents", keywords="submit", uid=uid, language=ln, req=req, navmenuid='submit' ) def Request_Print(m, txt): """The argumemts to this function are the display mode (m) and the text to be displayed (txt). """ return txt def Evaluate_Parameter (field, doctype): # Returns the literal value of the parameter. Assumes that the value is # uniquely determined by the doctype, i.e. doctype is the primary key in # the table # If the table name is not null, evaluate the parameter ## TODO: The above comment looks like nonesense? This ## function only seems to get the values of parameters ## from the db... ## Get the value for the parameter: param_val = get_parameter_value_for_doctype(doctype, field) if param_val is None: ## Couldn't find a value for this parameter for this doctype. ## Instead, try with the default doctype (DEF): param_val = get_parameter_value_for_doctype("DEF", field) if param_val is None: ## There was no value for the parameter for the default doctype. ## Nothing can be done about it - return an empty string: return "" else: ## There was some kind of value for the parameter; return it: return param_val def Get_Parameters (function, doctype): """For a given function of a given document type, a dictionary of the parameter names and values are returned. @param function: (string) - the name of the function for which the parameters are to be retrieved. @param doctype: (string) - the ID of the document type. @return: (dictionary) - of the parameters of the function. Keyed by the parameter name, values are of course the parameter values. """ parray = {} ## Get the names of the parameters expected by this function: func_params = get_parameters_of_function(function) for func_param in func_params: ## For each of the parameters, get its value for this document- ## type and add it into the dictionary of parameters: parameter = func_param[0] parray[parameter] = Evaluate_Parameter (parameter, doctype) return parray def get_level(doctype, action): """Get the level of a given submission. If unknown, return 0 as the level. @param doctype: (string) - the ID of the document type. @param action: (string) - the ID of the action. @return: (integer) - the level of the submission; 0 otherwise. """ subm_details = get_details_of_submission(doctype, action) if subm_details is not None: ## Return the level of this action subm_level = subm_details[9] try: int(subm_level) except ValueError: return 0 else: return subm_level else: return 0 def action_details (doctype, action): # Prints whether the action is mandatory or optional. The score of the # action is returned (-1 if the action was optional) subm_details = get_details_of_submission(doctype, action) if subm_details is not None: if subm_details[9] != "0": ## This action is mandatory; return the score: return subm_details[10] else: return -1 else: return -1 def print_function_calls (req, doctype, action, step, form, start_time, access, curdir, dismode, rn, last_step, action_score, ln=CFG_SITE_LANG): # Calls the functions required by an "action" action on a "doctype" document # In supervisor mode, a table of the function calls is produced user_info = collect_user_info(req) # load the right message language _ = gettext_set_language(ln) t = "" ## Here follows the global protect environment. the_globals = { 'doctype' : doctype, 'action' : action, 'step' : step, 'access' : access, 'ln' : ln, 'curdir' : curdir, 'uid' : user_info['uid'], 'uid_email' : user_info['email'], 'rn' : rn, 'last_step' : last_step, 'action_score' : action_score, '__websubmit_in_jail__' : True, 'form' : form, 'user_info' : user_info, '__builtins__' : globals()['__builtins__'], 'Request_Print': Request_Print } ## Get the list of functions to be called funcs_to_call = get_functions_for_submission_step(doctype, action, step) ## If no functions are found at this step for this doctype, ## get the functions for the DEF(ault) doctype: if len(funcs_to_call) == 0: funcs_to_call = get_functions_for_submission_step("DEF", action, step) if len(funcs_to_call) > 0: # while there are functions left... functions = [] for function in funcs_to_call: try: function_name = function[0] function_score = function[1] currfunction = { 'name' : function_name, 'score' : function_score, 'error' : 0, 'text' : '', } if os.path.exists("%s/invenio/websubmit_functions/%s.py" % (CFG_PYLIBDIR, function_name)): # import the function itself #function = getattr(invenio.websubmit_functions, function_name) execfile("%s/invenio/websubmit_functions/%s.py" % (CFG_PYLIBDIR, function_name), the_globals) if function_name not in the_globals: currfunction['error'] = 1 else: the_globals['function'] = the_globals[function_name] # Evaluate the parameters, and place them in an array the_globals['parameters'] = Get_Parameters(function_name, doctype) # Call function: log_function(curdir, "Start %s" % function_name, start_time) try: try: ## Attempt to call the function with 4 arguments: ## ("parameters", "curdir" and "form" as usual), ## and "user_info" - the dictionary of user ## information: ## ## Note: The function should always be called with ## these keyword arguments because the "TypeError" ## except clause checks for a specific mention of ## the 'user_info' keyword argument when a legacy ## function (one that accepts only 'parameters', ## 'curdir' and 'form') has been called and if ## the error string doesn't contain this, ## the TypeError will be considered as a something ## that was incorrectly handled in the function and ## will be propagated as an ## InvenioWebSubmitFunctionError instead of the ## function being called again with the legacy 3 ## arguments. func_returnval = eval("function(parameters=parameters, curdir=curdir, form=form, user_info=user_info)", the_globals) except TypeError, err: ## If the error contains the string "got an ## unexpected keyword argument", it means that the ## function doesn't accept the "user_info" ## argument. Test for this: if "got an unexpected keyword argument 'user_info'" in \ str(err).lower(): ## As expected, the function doesn't accept ## the user_info keyword argument. Call it ## again with the legacy 3 arguments ## (parameters, curdir, form): func_returnval = eval("function(parameters=parameters, curdir=curdir, form=form)", the_globals) else: ## An unexpected "TypeError" was caught. ## It looks as though the function itself didn't ## handle something correctly. ## Convert this error into an ## InvenioWebSubmitFunctionError and raise it: msg = "Unhandled TypeError caught when " \ "calling [%s] WebSubmit function: " \ "[%s]" % (function_name, str(err)) raise InvenioWebSubmitFunctionError(msg) except InvenioWebSubmitFunctionWarning, err: ## There was an unexpected behaviour during the ## execution. Log the message into function's log ## and go to next function log_function(curdir, "***Warning*** from %s: %s" \ % (function_name, str(err)), start_time) ## Reset "func_returnval" to None: func_returnval = None register_exception(req=req, alert_admin=True, prefix="Warning in executing function %s with globals %s" % (pprint.pformat(currfunction), pprint.pformat(the_globals))) log_function(curdir, "End %s" % function_name, start_time) if func_returnval is not None: ## Append the returned value as a string: currfunction['text'] = str(func_returnval) else: ## The function the NoneType. Don't keep that value as ## the currfunction->text. Replace it with the empty ## string. currfunction['text'] = "" else: currfunction['error'] = 1 functions.append(currfunction) except InvenioWebSubmitFunctionStop, err: ## The submission asked to stop execution. This is ## ok. Do not alert admin, and raise exception further log_function(curdir, "***Stop*** from %s: %s" \ % (function_name, str(err)), start_time) raise except: register_exception(req=req, alert_admin=True, prefix="Error in executing function %s with globals %s" % (pprint.pformat(currfunction), pprint.pformat(the_globals))) raise t = websubmit_templates.tmpl_function_output( ln = ln, display_on = (dismode == 'S'), action = action, doctype = doctype, step = step, functions = functions, ) else : if dismode == 'S': t = "<br /><br /><b>" + _("The chosen action is not supported by the document type.") + "</b>" return (t, the_globals['last_step'], the_globals['action_score']) def Propose_Next_Action (doctype, action_score, access, currentlevel, indir, ln=CFG_SITE_LANG): t = "" next_submissions = \ get_submissions_at_level_X_with_score_above_N(doctype, currentlevel, action_score) if len(next_submissions) > 0: actions = [] first_score = next_submissions[0][10] for action in next_submissions: if action[10] == first_score: ## Get the submission directory of this action: nextdir = get_storage_directory_of_action(action[1]) if nextdir is None: nextdir = "" curraction = { 'page' : action[11], 'action' : action[1], 'doctype' : doctype, 'nextdir' : nextdir, 'access' : access, 'indir' : indir, 'name' : action[12], } actions.append(curraction) t = websubmit_templates.tmpl_next_action( ln = ln, actions = actions, ) return t def specialchars(text): text = string.replace(text, "&#147;", "\042"); text = string.replace(text, "&#148;", "\042"); text = string.replace(text, "&#146;", "\047"); text = string.replace(text, "&#151;", "\055"); text = string.replace(text, "&#133;", "\056\056\056"); return text def log_function(curdir, message, start_time, filename="function_log"): """Write into file the message and the difference of time between starttime and current time @param curdir:(string) path to the destination dir @param message: (string) message to write into the file @param starttime: (float) time to compute from @param filname: (string) name of log file """ time_lap = "%.3f" % (time.time() - start_time) if os.access(curdir, os.F_OK|os.W_OK): fd = open("%s/%s" % (curdir, filename), "a+") fd.write("""%s --- %s\n""" % (message, time_lap)) fd.close() ## FIXME: Duplicated def errorMsg(title, req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG): # load the right message language _ = gettext_set_language(ln) return page(title = _("Error"), body = create_error_box(req, title=title, verbose=0, ln=ln), description="%s - Internal Error" % c, keywords="%s, Internal Error" % c, uid = getUid(req), language=ln, req=req, navmenuid='submit') def warningMsg(title, req, c=CFG_SITE_NAME, ln=CFG_SITE_LANG): # load the right message language _ = gettext_set_language(ln) return page(title = _("Warning"), body = title, description="%s - Warning" % c, keywords="%s, Warning" % c, uid = getUid(req), language=ln, req=req, navmenuid='submit')
gpl-2.0
dimaspivak/docker-py
docker/models/images.py
3
9972
import re import six from ..api import APIClient from ..errors import BuildError from ..utils.json_stream import json_stream from .resource import Collection, Model class Image(Model): """ An image on the server. """ def __repr__(self): return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags)) @property def labels(self): """ The labels of an image as dictionary. """ result = self.attrs['Config'].get('Labels') return result or {} @property def short_id(self): """ The ID of the image truncated to 10 characters, plus the ``sha256:`` prefix. """ if self.id.startswith('sha256:'): return self.id[:17] return self.id[:10] @property def tags(self): """ The image's tags. """ tags = self.attrs.get('RepoTags') if tags is None: tags = [] return [tag for tag in tags if tag != '<none>:<none>'] def history(self): """ Show the history of an image. Returns: (str): The history of the image. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self.client.api.history(self.id) def save(self): """ Get a tarball of an image. Similar to the ``docker save`` command. Returns: (urllib3.response.HTTPResponse object): The response from the daemon. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> image = cli.images.get("fedora:latest") >>> resp = image.save() >>> f = open('/tmp/fedora-latest.tar', 'w') >>> for chunk in resp.stream(): >>> f.write(chunk) >>> f.close() """ return self.client.api.get_image(self.id) def tag(self, repository, tag=None, **kwargs): """ Tag this image into a repository. Similar to the ``docker tag`` command. Args: repository (str): The repository to set for the tag tag (str): The tag name force (bool): Force Raises: :py:class:`docker.errors.APIError` If the server returns an error. Returns: (bool): ``True`` if successful """ return self.client.api.tag(self.id, repository, tag=tag, **kwargs) class ImageCollection(Collection): model = Image def build(self, **kwargs): """ Build an image and return it. Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` must be set. If you have a tar file for the Docker build context (including a Dockerfile) already, pass a readable file-like object to ``fileobj`` and also pass ``custom_context=True``. If the stream is compressed also, set ``encoding`` to the correct value (e.g ``gzip``). If you want to get the raw output of the build, use the :py:meth:`~docker.api.build.BuildApiMixin.build` method in the low-level API. Args: path (str): Path to the directory containing the Dockerfile fileobj: A file object to use as the Dockerfile. (Or a file-like object) tag (str): A tag to add to the final image quiet (bool): Whether to return the status nocache (bool): Don't use the cache when set to ``True`` rm (bool): Remove intermediate containers. The ``docker build`` command now defaults to ``--rm=true``, but we have kept the old default of `False` to preserve backward compatibility timeout (int): HTTP timeout custom_context (bool): Optional if using ``fileobj`` encoding (str): The encoding for a stream. Set to ``gzip`` for compressing pull (bool): Downloads any updates to the FROM image in Dockerfiles forcerm (bool): Always remove intermediate containers, even after unsuccessful builds dockerfile (str): path within the build context to the Dockerfile buildargs (dict): A dictionary of build arguments container_limits (dict): A dictionary of limits applied to each container created by the build process. Valid keys: - memory (int): set memory limit for build - memswap (int): Total memory (memory + swap), -1 to disable swap - cpushares (int): CPU shares (relative weight) - cpusetcpus (str): CPUs in which to allow execution, e.g., ``"0-3"``, ``"0,1"`` shmsize (int): Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB labels (dict): A dictionary of labels to set on the image cache_from (list): A list of images used for build cache resolution target (str): Name of the build-stage to build in a multi-stage Dockerfile network_mode (str): networking mode for the run commands during build Returns: (:py:class:`Image`): The built image. Raises: :py:class:`docker.errors.BuildError` If there is an error during the build. :py:class:`docker.errors.APIError` If the server returns any other error. ``TypeError`` If neither ``path`` nor ``fileobj`` is specified. """ resp = self.client.api.build(**kwargs) if isinstance(resp, six.string_types): return self.get(resp) last_event = None image_id = None for chunk in json_stream(resp): if 'error' in chunk: raise BuildError(chunk['error']) if 'stream' in chunk: match = re.search( r'(^Successfully built |sha256:)([0-9a-f]+)$', chunk['stream'] ) if match: image_id = match.group(2) last_event = chunk if image_id: return self.get(image_id) raise BuildError(last_event or 'Unknown') def get(self, name): """ Gets an image. Args: name (str): The name of the image. Returns: (:py:class:`Image`): The image. Raises: :py:class:`docker.errors.ImageNotFound` If the image does not exist. :py:class:`docker.errors.APIError` If the server returns an error. """ return self.prepare_model(self.client.api.inspect_image(name)) def list(self, name=None, all=False, filters=None): """ List images on the server. Args: name (str): Only show images belonging to the repository ``name`` all (bool): Show intermediate image layers. By default, these are filtered out. filters (dict): Filters to be processed on the image list. Available filters: - ``dangling`` (bool) - ``label`` (str): format either ``key`` or ``key=value`` Returns: (list of :py:class:`Image`): The images. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ resp = self.client.api.images(name=name, all=all, filters=filters) return [self.prepare_model(r) for r in resp] def load(self, data): """ Load an image that was previously saved using :py:meth:`~docker.models.images.Image.save` (or ``docker save``). Similar to ``docker load``. Args: data (binary): Image data to be loaded. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self.client.api.load_image(data) def pull(self, name, tag=None, **kwargs): """ Pull an image of the given name and return it. Similar to the ``docker pull`` command. If you want to get the raw pull output, use the :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the low-level API. Args: repository (str): The repository to pull tag (str): The tag to pull insecure_registry (bool): Use an insecure registry auth_config (dict): Override the credentials that :py:meth:`~docker.client.DockerClient.login` has set for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. Returns: (:py:class:`Image`): The image that has been pulled. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> image = client.images.pull('busybox') """ self.client.api.pull(name, tag=tag, **kwargs) return self.get('{0}:{1}'.format(name, tag) if tag else name) def push(self, repository, tag=None, **kwargs): return self.client.api.push(repository, tag=tag, **kwargs) push.__doc__ = APIClient.push.__doc__ def remove(self, *args, **kwargs): self.client.api.remove_image(*args, **kwargs) remove.__doc__ = APIClient.remove_image.__doc__ def search(self, *args, **kwargs): return self.client.api.search(*args, **kwargs) search.__doc__ = APIClient.search.__doc__ def prune(self, filters=None): return self.client.api.prune_images(filters=filters) prune.__doc__ = APIClient.prune_images.__doc__
apache-2.0
ntt-sic/neutron
neutron/plugins/ml2/drivers/l2pop/mech_driver.py
6
8782
# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Sylvain Afchain, eNovance SAS # @author: Francois Eleouet, Orange # @author: Mathieu Rohon, Orange from oslo.config import cfg from neutron.common import constants as const from neutron import context as n_context from neutron.db import api as db_api from neutron.openstack.common import log as logging from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.l2pop import config # noqa from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc LOG = logging.getLogger(__name__) class L2populationMechanismDriver(api.MechanismDriver, l2pop_db.L2populationDbMixin): def initialize(self): LOG.debug(_("Experimental L2 population driver")) self.rpc_ctx = n_context.get_admin_context_without_session() def _get_port_fdb_entries(self, port): return [[port['mac_address'], ip['ip_address']] for ip in port['fixed_ips']] def delete_port_precommit(self, context): self.remove_fdb_entries = self._update_port_down(context) def delete_port_postcommit(self, context): l2pop_rpc.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, self.remove_fdb_entries) def _get_diff_ips(self, orig, port): orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) # check if an ip has been added or removed orig_chg_ips = orig_ips.difference(port_ips) port_chg_ips = port_ips.difference(orig_ips) if orig_chg_ips or port_chg_ips: return orig_chg_ips, port_chg_ips def _fixed_ips_changed(self, context, orig, port): diff_ips = self._get_diff_ips(orig, port) if not diff_ips: return orig_ips, port_ips = diff_ips port_infos = self._get_port_infos(context, orig) if not port_infos: return agent, agent_ip, segment, port_fdb_entries = port_infos orig_mac_ip = [[port['mac_address'], ip] for ip in orig_ips] port_mac_ip = [[port['mac_address'], ip] for ip in port_ips] upd_fdb_entries = {port['network_id']: {agent_ip: {}}} ports = upd_fdb_entries[port['network_id']][agent_ip] if orig_mac_ip: ports['before'] = orig_mac_ip if port_mac_ip: ports['after'] = port_mac_ip l2pop_rpc.L2populationAgentNotify.update_fdb_entries( self.rpc_ctx, {'chg_ip': upd_fdb_entries}) return True def update_port_postcommit(self, context): port = context.current orig = context.original if port['status'] == orig['status']: self._fixed_ips_changed(context, orig, port) elif port['status'] == const.PORT_STATUS_ACTIVE: self._update_port_up(context) elif port['status'] == const.PORT_STATUS_DOWN: fdb_entries = self._update_port_down(context) l2pop_rpc.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) def _get_port_infos(self, context, port): agent_host = port['binding:host_id'] if not agent_host: return session = db_api.get_session() agent = self.get_agent_by_host(session, agent_host) if not agent: return agent_ip = self.get_agent_ip(agent) if not agent_ip: LOG.warning(_("Unable to retrieve the agent ip, check the agent " "configuration.")) return segment = context.bound_segment if not segment: LOG.warning(_("Port %(port)s updated by agent %(agent)s " "isn't bound to any segment"), {'port': port['id'], 'agent': agent}) return tunnel_types = self.get_agent_tunnel_types(agent) if segment['network_type'] not in tunnel_types: return fdb_entries = self._get_port_fdb_entries(port) return agent, agent_ip, segment, fdb_entries def _update_port_up(self, context): port_context = context.current port_infos = self._get_port_infos(context, port_context) if not port_infos: return agent, agent_ip, segment, port_fdb_entries = port_infos agent_host = port_context['binding:host_id'] network_id = port_context['network_id'] session = db_api.get_session() agent_ports = self.get_agent_network_port_count(session, agent_host, network_id) other_fdb_entries = {network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {agent_ip: []}}} if agent_ports == 1 or ( self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): # First port plugged on current agent in this network, # we have to provide it with the whole list of fdb entries agent_fdb_entries = {network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {}}} ports = agent_fdb_entries[network_id]['ports'] network_ports = self.get_network_ports(session, network_id) for network_port in network_ports: binding, agent = network_port if agent.host == agent_host: continue ip = self.get_agent_ip(agent) if not ip: LOG.debug(_("Unable to retrieve the agent ip, check " "the agent %(agent_host)s configuration."), {'agent_host': agent.host}) continue agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) agent_ports += self._get_port_fdb_entries(binding.port) ports[ip] = agent_ports # And notify other agents to add flooding entry other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) if ports.keys(): l2pop_rpc.L2populationAgentNotify.add_fdb_entries( self.rpc_ctx, agent_fdb_entries, agent_host) # Notify other agents to add fdb rule for current port other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries l2pop_rpc.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, other_fdb_entries) def _update_port_down(self, context): port_context = context.current port_infos = self._get_port_infos(context, port_context) if not port_infos: return agent, agent_ip, segment, port_fdb_entries = port_infos agent_host = port_context['binding:host_id'] network_id = port_context['network_id'] session = db_api.get_session() agent_ports = self.get_agent_network_port_count(session, agent_host, network_id) other_fdb_entries = {network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {agent_ip: []}}} if agent_ports == 1: # Agent is removing its last port in this network, # other agents needs to be notified to delete their flooding entry. other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) # Notify other agents to remove fdb rule for current port fdb_entries = self._get_port_fdb_entries(port_context) other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries return other_fdb_entries
apache-2.0
bowen0701/algorithms_data_structures
lc0314_binary_tree_vertical_order_traversal.py
1
3585
"""Leetcode 314. Binary Tree Vertical Order Traversal Medium URL: https://leetcode.com/problems/binary-tree-vertical-order-traversal/ Given a binary tree, return the vertical order traversal of its nodes' values. (ie, from top to bottom, column by column). If two nodes are in the same row and column, the order should be from left to right. Examples 1: Input: [3,9,20,null,null,15,7] 3 /\ / \ 9 20 /\ / \ 15 7 Output: [ [9], [3,15], [20], [7] ] Examples 2: Input: [3,9,8,4,0,1,7] 3 /\ / \ 9 8 /\ /\ / \/ \ 4 01 7 Output: [ [4], [9], [3,0,1], [8], [7] ] Examples 3: Input: [3,9,8,4,0,1,7,null,null,null,2,5] (0's right child is 2 and 1's left child is 5) 3 /\ / \ 9 8 /\ /\ / \/ \ 4 01 7 /\ / \ 5 2 Output: [ [4], [9,5], [3,0,1], [8,2], [7] ] """ # Definition for a binary tree node. class TreeNode(object): def __init__(self, val): self.val = val self.left = None self.right = None class SolutionOrderValsDictQueue(object): def verticalOrder(self, root): """ :type root: TreeNode :rtype: List[List[int]] Time complexity: O(n). Space complexity: O(n). """ from collections import defaultdict from collections import deque if not root: return [] # Create dict: vertical order->list(node values). vorder_vals_d = defaultdict(list) # Use queue to add root and left/right with their orders to dict. queue = deque([(root, 0)]) while queue: current, vorder = queue.pop() vorder_vals_d[vorder].append(current.val) if current.left: queue.appendleft((current.left, vorder - 1)) if current.right: queue.appendleft((current.right, vorder + 1)) # Return sorted list(node values) based on vertical order. vorder_vals = [vals for vorder, vals in sorted(vorder_vals_d.items())] return vorder_vals def main(): # 3 # /\ # / \ # 9 20 # /\ # / \ # 15 7 # Output: # [ # [9], # [3,15], # [20], # [7] # ] root = TreeNode(3) root.left = TreeNode(9) root.right = TreeNode(20) root.right.left = TreeNode(15) root.right.right = TreeNode(17) print SolutionOrderValsDictQueue().verticalOrder(root) # 3 # /\ # / \ # 9 8 # /\ /\ # / \/ \ # 4 01 7 # Output: # [ # [4], # [9], # [3,0,1], # [8], # [7] # ] root = TreeNode(3) root.left = TreeNode(9) root.right = TreeNode(8) root.left.left = TreeNode(4) root.left.right = TreeNode(0) root.right.left = TreeNode(1) root.right.right = TreeNode(7) print SolutionOrderValsDictQueue().verticalOrder(root) # 3 # /\ # / \ # 9 8 # /\ /\ # / \/ \ # 4 01 7 # /\ # / \ # 5 2 # Output: # [ # [4], # [9,5], # [3,0,1], # [8,2], # [7] # ] root = TreeNode(3) root.left = TreeNode(9) root.right = TreeNode(8) root.left.left = TreeNode(4) root.left.right = TreeNode(0) root.right.left = TreeNode(1) root.right.right = TreeNode(7) root.left.right.right = TreeNode(2) root.right.left.left = TreeNode(5) print SolutionOrderValsDictQueue().verticalOrder(root) if __name__ == '__main__': main()
bsd-2-clause
DirectXMan12/nova-hacking
nova/tests/db/test_migration_utils.py
1
21712
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from migrate.changeset import UniqueConstraint from sqlalchemy.dialects import mysql from sqlalchemy import Boolean, Index, Integer, DateTime, String from sqlalchemy import MetaData, Table, Column from sqlalchemy.engine import reflection from sqlalchemy.exc import NoSuchTableError from sqlalchemy.exc import SAWarning from sqlalchemy.sql import select from sqlalchemy.types import UserDefinedType, NullType from nova.db.sqlalchemy import api as db from nova.db.sqlalchemy import utils from nova import exception from nova.tests.db import test_migrations class CustomType(UserDefinedType): """Dummy column type for testing unsupported types.""" def get_col_spec(self): return "CustomType" class TestMigrationUtils(test_migrations.BaseMigrationTestCase): """Class for testing utils that are used in db migrations.""" def test_utils_drop_unique_constraint(self): table_name = "__test_tmp_table__" uc_name = 'uniq_foo' values = [ {'id': 1, 'a': 3, 'foo': 10}, {'id': 2, 'a': 2, 'foo': 20}, {'id': 3, 'a': 1, 'foo': 30} ] for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine test_table = Table(table_name, meta, Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), Column('foo', Integer), UniqueConstraint('a', name='uniq_a'), UniqueConstraint('foo', name=uc_name)) test_table.create() engine.execute(test_table.insert(), values) # NOTE(boris-42): This method is generic UC dropper. utils.drop_unique_constraint(engine, table_name, uc_name, 'foo') s = test_table.select().order_by(test_table.c.id) rows = engine.execute(s).fetchall() for i in xrange(0, len(values)): v = values[i] self.assertEqual((v['id'], v['a'], v['foo']), rows[i]) # NOTE(boris-42): Update data about Table from DB. meta = MetaData() meta.bind = engine test_table = Table(table_name, meta, autoload=True) constraints = filter(lambda c: c.name == uc_name, test_table.constraints) self.assertEqual(len(constraints), 0) self.assertEqual(len(test_table.constraints), 1) test_table.drop() def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self): table_name = "__test_tmp_table__" uc_name = 'uniq_foo' values = [ {'id': 1, 'a': 3, 'foo': 10}, {'id': 2, 'a': 2, 'foo': 20}, {'id': 3, 'a': 1, 'foo': 30} ] engine = self.engines['sqlite'] meta = MetaData(bind=engine) test_table = Table(table_name, meta, Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), Column('foo', CustomType, default=0), UniqueConstraint('a', name='uniq_a'), UniqueConstraint('foo', name=uc_name)) test_table.create() engine.execute(test_table.insert(), values) warnings.simplefilter("ignore", SAWarning) # NOTE(boris-42): Missing info about column `foo` that has # unsupported type CustomType. self.assertRaises(exception.NovaException, utils.drop_unique_constraint, engine, table_name, uc_name, 'foo') # NOTE(boris-42): Wrong type of foo instance. it should be # instance of sqlalchemy.Column. self.assertRaises(exception.NovaException, utils.drop_unique_constraint, engine, table_name, uc_name, 'foo', foo=Integer()) foo = Column('foo', CustomType, default=0) utils.drop_unique_constraint(engine, table_name, uc_name, 'foo', foo=foo) s = test_table.select().order_by(test_table.c.id) rows = engine.execute(s).fetchall() for i in xrange(0, len(values)): v = values[i] self.assertEqual((v['id'], v['a'], v['foo']), rows[i]) # NOTE(boris-42): Update data about Table from DB. meta = MetaData(bind=engine) test_table = Table(table_name, meta, autoload=True) constraints = filter(lambda c: c.name == uc_name, test_table.constraints) self.assertEqual(len(constraints), 0) self.assertEqual(len(test_table.constraints), 1) test_table.drop() def _populate_db_for_drop_duplicate_entries(self, engine, meta, table_name): values = [ {'id': 11, 'a': 3, 'b': 10, 'c': 'abcdef'}, {'id': 12, 'a': 5, 'b': 10, 'c': 'abcdef'}, {'id': 13, 'a': 6, 'b': 10, 'c': 'abcdef'}, {'id': 14, 'a': 7, 'b': 10, 'c': 'abcdef'}, {'id': 21, 'a': 1, 'b': 20, 'c': 'aa'}, {'id': 31, 'a': 1, 'b': 20, 'c': 'bb'}, {'id': 41, 'a': 1, 'b': 30, 'c': 'aef'}, {'id': 42, 'a': 2, 'b': 30, 'c': 'aef'}, {'id': 43, 'a': 3, 'b': 30, 'c': 'aef'} ] test_table = Table(table_name, meta, Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), Column('b', Integer), Column('c', String(255)), Column('deleted', Integer, default=0), Column('deleted_at', DateTime), Column('updated_at', DateTime)) test_table.create() engine.execute(test_table.insert(), values) return test_table, values def test_drop_old_duplicate_entries_from_table(self): table_name = "__test_tmp_table__" for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine test_table, values = self.\ _populate_db_for_drop_duplicate_entries(engine, meta, table_name) utils.drop_old_duplicate_entries_from_table(engine, table_name, False, 'b', 'c') uniq_values = set() expected_ids = [] for value in sorted(values, key=lambda x: x['id'], reverse=True): uniq_value = (('b', value['b']), ('c', value['c'])) if uniq_value in uniq_values: continue uniq_values.add(uniq_value) expected_ids.append(value['id']) real_ids = [row[0] for row in engine.execute(select([test_table.c.id])).fetchall()] self.assertEqual(len(real_ids), len(expected_ids)) for id_ in expected_ids: self.assertTrue(id_ in real_ids) def test_drop_old_duplicate_entries_from_table_soft_delete(self): table_name = "__test_tmp_table__" for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table, values = self.\ _populate_db_for_drop_duplicate_entries(engine, meta, table_name) utils.drop_old_duplicate_entries_from_table(engine, table_name, True, 'b', 'c') uniq_values = set() expected_values = [] soft_deleted_values = [] for value in sorted(values, key=lambda x: x['id'], reverse=True): uniq_value = (('b', value['b']), ('c', value['c'])) if uniq_value in uniq_values: soft_deleted_values.append(value) continue uniq_values.add(uniq_value) expected_values.append(value) base_select = table.select() rows_select = base_select.\ where(table.c.deleted != table.c.id) row_ids = [row['id'] for row in engine.execute(rows_select).fetchall()] self.assertEqual(len(row_ids), len(expected_values)) for value in expected_values: self.assertTrue(value['id'] in row_ids) deleted_rows_select = base_select.\ where(table.c.deleted == table.c.id) deleted_rows_ids = [row['id'] for row in engine.execute(deleted_rows_select).fetchall()] self.assertEqual(len(deleted_rows_ids), len(values) - len(row_ids)) for value in soft_deleted_values: self.assertTrue(value['id'] in deleted_rows_ids) def test_check_shadow_table(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer), Column('c', String(256))) table.create() #check missing shadow table self.assertRaises(NoSuchTableError, utils.check_shadow_table, engine, table_name) shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, Column('id', Integer), Column('a', Integer)) shadow_table.create() # check missing column self.assertRaises(exception.NovaException, utils.check_shadow_table, engine, table_name) # check when all is ok c = Column('c', String(256)) shadow_table.create_column(c) self.assertTrue(utils.check_shadow_table(engine, table_name)) # check extra column d = Column('d', Integer) shadow_table.create_column(d) self.assertRaises(exception.NovaException, utils.check_shadow_table, engine, table_name) def test_check_shadow_table_different_types(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer)) table.create() shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, Column('id', Integer, primary_key=True), Column('a', String(256))) shadow_table.create() self.assertRaises(exception.NovaException, utils.check_shadow_table, engine, table_name) def test_check_shadow_table_with_unsupported_type(self): table_name = 'abc' engine = self.engines['sqlite'] meta = MetaData(bind=engine) table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer), Column('c', CustomType)) table.create() shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer), Column('c', CustomType)) shadow_table.create() self.assertTrue(utils.check_shadow_table(engine, table_name)) def test_create_shadow_table_by_table_instance(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer), Column('b', String(256))) table.create() utils.create_shadow_table(engine, table=table) self.assertTrue(utils.check_shadow_table(engine, table_name)) def test_create_shadow_table_by_name(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer), Column('b', String(256))) table.create() utils.create_shadow_table(engine, table_name=table_name) self.assertTrue(utils.check_shadow_table(engine, table_name)) def test_create_shadow_table_not_supported_type(self): table_name = 'abc' engine = self.engines['sqlite'] meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', CustomType)) table.create() self.assertRaises(exception.NovaException, utils.create_shadow_table, engine, table_name=table_name) utils.create_shadow_table(engine, table_name=table_name, a=Column('a', CustomType())) self.assertTrue(utils.check_shadow_table(engine, table_name)) def test_create_shadow_both_table_and_table_name_are_none(self): for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine self.assertRaises(exception.NovaException, utils.create_shadow_table, engine) def test_create_shadow_both_table_and_table_name_are_specified(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer)) table.create() self.assertRaises(exception.NovaException, utils.create_shadow_table, engine, table=table, table_name=table_name) def test_create_duplicate_shadow_table(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', Integer)) table.create() utils.create_shadow_table(engine, table_name=table_name) self.assertRaises(exception.ShadowTableExists, utils.create_shadow_table, engine, table_name=table_name) def test_change_deleted_column_type_doesnt_drop_index(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData(bind=engine) indexes = { 'idx_a_deleted': ['a', 'deleted'], 'idx_b_deleted': ['b', 'deleted'], 'idx_a': ['a'] } index_instances = [Index(name, *columns) for name, columns in indexes.iteritems()] table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('a', String(255)), Column('b', String(255)), Column('deleted', Boolean), *index_instances) table.create() utils.change_deleted_column_type_to_id_type(engine, table_name) utils.change_deleted_column_type_to_boolean(engine, table_name) insp = reflection.Inspector.from_engine(engine) real_indexes = insp.get_indexes(table_name) self.assertEqual(len(real_indexes), 3) for index in real_indexes: name = index['name'] self.assertIn(name, indexes) self.assertEqual(set(index['column_names']), set(indexes[name])) def test_change_deleted_column_type_to_id_type_integer(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('deleted', Boolean)) table.create() utils.change_deleted_column_type_to_id_type(engine, table_name) table = utils.get_table(engine, table_name) self.assertTrue(isinstance(table.c.deleted.type, Integer)) def test_change_deleted_column_type_to_id_type_string(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', String(255), primary_key=True), Column('deleted', Boolean)) table.create() utils.change_deleted_column_type_to_id_type(engine, table_name) table = utils.get_table(engine, table_name) self.assertTrue(isinstance(table.c.deleted.type, String)) def test_change_deleted_column_type_to_id_type_custom(self): table_name = 'abc' engine = self.engines['sqlite'] meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('foo', CustomType), Column('deleted', Boolean)) table.create() self.assertRaises(exception.NovaException, utils.change_deleted_column_type_to_id_type, engine, table_name) fooColumn = Column('foo', CustomType()) utils.change_deleted_column_type_to_id_type(engine, table_name, foo=fooColumn) table = utils.get_table(engine, table_name) # NOTE(boris-42): There is no way to check has foo type CustomType. # but sqlalchemy will set it to NullType. self.assertTrue(isinstance(table.c.foo.type, NullType)) self.assertTrue(isinstance(table.c.deleted.type, Integer)) def test_change_deleted_column_type_to_boolean(self): table_name = 'abc' for key, engine in self.engines.items(): meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('deleted', Integer)) table.create() utils.change_deleted_column_type_to_boolean(engine, table_name) table = utils.get_table(engine, table_name) expected_type = Boolean if key != "mysql" else mysql.TINYINT self.assertTrue(isinstance(table.c.deleted.type, expected_type)) def test_change_deleted_column_type_to_boolean_type_custom(self): table_name = 'abc' engine = self.engines['sqlite'] meta = MetaData() meta.bind = engine table = Table(table_name, meta, Column('id', Integer, primary_key=True), Column('foo', CustomType), Column('deleted', Integer)) table.create() self.assertRaises(exception.NovaException, utils.change_deleted_column_type_to_boolean, engine, table_name) fooColumn = Column('foo', CustomType()) utils.change_deleted_column_type_to_boolean(engine, table_name, foo=fooColumn) table = utils.get_table(engine, table_name) # NOTE(boris-42): There is no way to check has foo type CustomType. # but sqlalchemy will set it to NullType. self.assertTrue(isinstance(table.c.foo.type, NullType)) self.assertTrue(isinstance(table.c.deleted.type, Boolean))
apache-2.0
twm/django-yarr
yarr/migrations/0001_initial.py
2
8454
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Feed' db.create_table('yarr_feed', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=255)), ('feed_url', self.gf('django.db.models.fields.URLField')(max_length=200)), ('site_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)), ('check_frequency', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('last_checked', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('next_check', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('error', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), )) db.send_create_signal('yarr', ['Feed']) # Adding model 'Entry' db.create_table('yarr_entry', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('feed', self.gf('django.db.models.fields.related.ForeignKey')(related_name='entries', to=orm['yarr.Feed'])), ('read', self.gf('django.db.models.fields.BooleanField')(default=False)), ('saved', self.gf('django.db.models.fields.BooleanField')(default=False)), ('title', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('content', self.gf('django.db.models.fields.TextField')(blank=True)), ('date', self.gf('django.db.models.fields.DateTimeField')()), ('author', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ('comments_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ('guid', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), )) db.send_create_signal('yarr', ['Entry']) def backwards(self, orm): # Deleting model 'Feed' db.delete_table('yarr_feed') # Deleting model 'Entry' db.delete_table('yarr_entry') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'yarr.entry': { 'Meta': {'ordering': "('-date',)", 'object_name': 'Entry'}, 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'comments_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'date': ('django.db.models.fields.DateTimeField', [], {}), 'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['yarr.Feed']"}), 'guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'saved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'yarr.feed': { 'Meta': {'ordering': "('title', 'added')", 'object_name': 'Feed'}, 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'check_frequency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'next_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'site_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['yarr']
bsd-3-clause
Aristocles/CouchPotatoServer
libs/oauthlib/oauth1/rfc5849/parameters.py
186
4817
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.parameters ~~~~~~~~~~~~~~~~~~~ This module contains methods related to `section 3.5`_ of the OAuth 1.0a spec. .. _`section 3.5`: http://tools.ietf.org/html/rfc5849#section-3.5 """ from urlparse import urlparse, urlunparse from . import utils from oauthlib.common import extract_params, urlencode # TODO: do we need filter_params now that oauth_params are handled by Request? # We can easily pass in just oauth protocol params. @utils.filter_params def prepare_headers(oauth_params, headers=None, realm=None): """**Prepare the Authorization header.** Per `section 3.5.1`_ of the spec. Protocol parameters can be transmitted using the HTTP "Authorization" header field as defined by `RFC2617`_ with the auth-scheme name set to "OAuth" (case insensitive). For example:: Authorization: OAuth realm="Example", oauth_consumer_key="0685bd9184jfhq22", oauth_token="ad180jjd733klru7", oauth_signature_method="HMAC-SHA1", oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D", oauth_timestamp="137131200", oauth_nonce="4572616e48616d6d65724c61686176", oauth_version="1.0" .. _`section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1 .. _`RFC2617`: http://tools.ietf.org/html/rfc2617 """ headers = headers or {} # Protocol parameters SHALL be included in the "Authorization" header # field as follows: authorization_header_parameters_parts = [] for oauth_parameter_name, value in oauth_params: # 1. Parameter names and values are encoded per Parameter Encoding # (`Section 3.6`_) # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 escaped_name = utils.escape(oauth_parameter_name) escaped_value = utils.escape(value) # 2. Each parameter's name is immediately followed by an "=" character # (ASCII code 61), a """ character (ASCII code 34), the parameter # value (MAY be empty), and another """ character (ASCII code 34). part = u'{0}="{1}"'.format(escaped_name, escaped_value) authorization_header_parameters_parts.append(part) # 3. Parameters are separated by a "," character (ASCII code 44) and # OPTIONAL linear whitespace per `RFC2617`_. # # .. _`RFC2617`: http://tools.ietf.org/html/rfc2617 authorization_header_parameters = ', '.join( authorization_header_parameters_parts) # 4. The OPTIONAL "realm" parameter MAY be added and interpreted per # `RFC2617 section 1.2`_. # # .. _`RFC2617 section 1.2`: http://tools.ietf.org/html/rfc2617#section-1.2 if realm: # NOTE: realm should *not* be escaped authorization_header_parameters = (u'realm="%s", ' % realm + authorization_header_parameters) # the auth-scheme name set to "OAuth" (case insensitive). authorization_header = u'OAuth %s' % authorization_header_parameters # contribute the Authorization header to the given headers full_headers = {} full_headers.update(headers) full_headers[u'Authorization'] = authorization_header return full_headers def _append_params(oauth_params, params): """Append OAuth params to an existing set of parameters. Both params and oauth_params is must be lists of 2-tuples. Per `section 3.5.2`_ and `3.5.3`_ of the spec. .. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2 .. _`3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3 """ merged = list(params) merged.extend(oauth_params) # The request URI / entity-body MAY include other request-specific # parameters, in which case, the protocol parameters SHOULD be appended # following the request-specific parameters, properly separated by an "&" # character (ASCII code 38) merged.sort(key=lambda i: i[0].startswith('oauth_')) return merged def prepare_form_encoded_body(oauth_params, body): """Prepare the Form-Encoded Body. Per `section 3.5.2`_ of the spec. .. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2 """ # append OAuth params to the existing body return _append_params(oauth_params, body) def prepare_request_uri_query(oauth_params, uri): """Prepare the Request URI Query. Per `section 3.5.3`_ of the spec. .. _`section 3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3 """ # append OAuth params to the existing set of query components sch, net, path, par, query, fra = urlparse(uri) query = urlencode(_append_params(oauth_params, extract_params(query) or [])) return urlunparse((sch, net, path, par, query, fra))
gpl-3.0
JoshuaSBrown/langmuir
LangmuirPython/misc/create_rst.py
2
1726
# -*- coding: utf-8 -*- """ create_rst.py ============= .. argparse:: :module: create_rst :func: create_parser :prog: create_rst.py .. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com> """ import argparse import os desc = """ Create rst files from all .py files found in path. """ def create_parser(): parser = argparse.ArgumentParser() parser.description = desc parser.add_argument(dest='ipath', nargs='?', default=None, help='path to search') parser.add_argument(dest='opath', nargs='?', default=None, help='path to write' ) parser.add_argument('-r', action='store_true', help='search recursivly') return parser def get_arguments(args=None): parser = create_parser() opts = parser.parse_args(args) if opts.ipath is None: opts.ipath = os.getcwd() if opts.opath is None: opts.opath = os.getcwd() return opts if __name__ == '__main__': work = os.getcwd() opts = get_arguments() sdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) found = [] for root, dirs, files in os.walk(opts.ipath): for f in files: stub, ext = os.path.splitext(f) if ext == '.py': found.append(os.path.relpath(os.path.join(root, f), sdir)) if not opts.r: break for f in found: fdir = os.path.join(opts.opath, os.path.dirname(f)) if not os.path.exists(fdir): os.makedirs(fdir) stub, ext = os.path.splitext(f) with open(stub + '.rst', 'w') as handle: print >> handle, r'.. automodule:: %s' % os.path.basename(stub) print >> handle, r' :members:' print os.path.join('scripts', stub)
gpl-2.0
techsd/namebench
nb_third_party/simplejson/scanner.py
674
2560
"""JSON token scanner """ import re def _import_c_make_scanner(): try: from simplejson._speedups import make_scanner return make_scanner except ImportError: return None c_make_scanner = _import_c_make_scanner() __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook object_pairs_hook = context.object_pairs_hook memo = context.memo def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook, object_pairs_hook, memo) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration def scan_once(string, idx): try: return _scan_once(string, idx) finally: memo.clear() return scan_once make_scanner = c_make_scanner or py_make_scanner
apache-2.0
mariopro/youtube-dl
youtube_dl/extractor/appletrailers.py
101
5336
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( int_or_none, ) class AppleTrailersIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)' _TESTS = [{ "url": "http://trailers.apple.com/trailers/wb/manofsteel/", 'info_dict': { 'id': 'manofsteel', }, "playlist": [ { "md5": "d97a8e575432dbcb81b7c3acb741f8a8", "info_dict": { "id": "manofsteel-trailer4", "ext": "mov", "duration": 111, "title": "Trailer 4", "upload_date": "20130523", "uploader_id": "wb", }, }, { "md5": "b8017b7131b721fb4e8d6f49e1df908c", "info_dict": { "id": "manofsteel-trailer3", "ext": "mov", "duration": 182, "title": "Trailer 3", "upload_date": "20130417", "uploader_id": "wb", }, }, { "md5": "d0f1e1150989b9924679b441f3404d48", "info_dict": { "id": "manofsteel-trailer", "ext": "mov", "duration": 148, "title": "Trailer", "upload_date": "20121212", "uploader_id": "wb", }, }, { "md5": "5fe08795b943eb2e757fa95cb6def1cb", "info_dict": { "id": "manofsteel-teaser", "ext": "mov", "duration": 93, "title": "Teaser", "upload_date": "20120721", "uploader_id": "wb", }, }, ] }, { 'url': 'http://trailers.apple.com/ca/metropole/autrui/', 'only_matching': True, }] _JSON_RE = r'iTunes.playURL\((.*?)\);' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) movie = mobj.group('movie') uploader_id = mobj.group('company') playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') def fix_html(s): s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s) # The ' in the onClick attributes are not escaped, it couldn't be parsed # like: http://trailers.apple.com/trailers/wb/gravity/ def _clean_json(m): return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;') s = re.sub(self._JSON_RE, _clean_json, s) s = '<html>%s</html>' % s return s doc = self._download_xml(playlist_url, movie, transform_source=fix_html) playlist = [] for li in doc.findall('./div/ul/li'): on_click = li.find('.//a').attrib['onClick'] trailer_info_json = self._search_regex(self._JSON_RE, on_click, 'trailer info') trailer_info = json.loads(trailer_info_json) title = trailer_info['title'] video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() thumbnail = li.find('.//img').attrib['src'] upload_date = trailer_info['posted'].replace('-', '') runtime = trailer_info['runtime'] m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime) duration = None if m: duration = 60 * int(m.group('minutes')) + int(m.group('seconds')) first_url = trailer_info['url'] trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower() settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id) settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json') formats = [] for format in settings['metadata']['sizes']: # The src is a file pointing to the real video file format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src']) formats.append({ 'url': format_url, 'format': format['type'], 'width': int_or_none(format['width']), 'height': int_or_none(format['height']), }) self._sort_formats(formats) playlist.append({ '_type': 'video', 'id': video_id, 'formats': formats, 'title': title, 'duration': duration, 'thumbnail': thumbnail, 'upload_date': upload_date, 'uploader_id': uploader_id, 'http_headers': { 'User-Agent': 'QuickTime compatible (youtube-dl)', }, }) return { '_type': 'playlist', 'id': movie, 'entries': playlist, }
unlicense
zhangjunlei26/servo
tests/wpt/css-tests/tools/pywebsocket/src/test/test_mock.py
496
5168
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for mock module.""" import Queue import threading import unittest import set_sys_path # Update sys.path to locate mod_pywebsocket module. from test import mock class MockConnTest(unittest.TestCase): """A unittest for MockConn class.""" def setUp(self): self._conn = mock.MockConn('ABC\r\nDEFG\r\n\r\nHIJK') def test_readline(self): self.assertEqual('ABC\r\n', self._conn.readline()) self.assertEqual('DEFG\r\n', self._conn.readline()) self.assertEqual('\r\n', self._conn.readline()) self.assertEqual('HIJK', self._conn.readline()) self.assertEqual('', self._conn.readline()) def test_read(self): self.assertEqual('ABC\r\nD', self._conn.read(6)) self.assertEqual('EFG\r\n\r\nHI', self._conn.read(9)) self.assertEqual('JK', self._conn.read(10)) self.assertEqual('', self._conn.read(10)) def test_read_and_readline(self): self.assertEqual('ABC\r\nD', self._conn.read(6)) self.assertEqual('EFG\r\n', self._conn.readline()) self.assertEqual('\r\nHIJK', self._conn.read(9)) self.assertEqual('', self._conn.readline()) def test_write(self): self._conn.write('Hello\r\n') self._conn.write('World\r\n') self.assertEqual('Hello\r\nWorld\r\n', self._conn.written_data()) class MockBlockingConnTest(unittest.TestCase): """A unittest for MockBlockingConn class.""" def test_read(self): """Tests that data put to MockBlockingConn by put_bytes method can be read from it. """ class LineReader(threading.Thread): """A test class that launches a thread, calls readline on the specified conn repeatedly and puts the read data to the specified queue. """ def __init__(self, conn, queue): threading.Thread.__init__(self) self._queue = queue self._conn = conn self.setDaemon(True) self.start() def run(self): while True: data = self._conn.readline() self._queue.put(data) conn = mock.MockBlockingConn() queue = Queue.Queue() reader = LineReader(conn, queue) self.failUnless(queue.empty()) conn.put_bytes('Foo bar\r\n') read = queue.get() self.assertEqual('Foo bar\r\n', read) class MockTableTest(unittest.TestCase): """A unittest for MockTable class.""" def test_create_from_dict(self): table = mock.MockTable({'Key': 'Value'}) self.assertEqual('Value', table.get('KEY')) self.assertEqual('Value', table['key']) def test_create_from_list(self): table = mock.MockTable([('Key', 'Value')]) self.assertEqual('Value', table.get('KEY')) self.assertEqual('Value', table['key']) def test_create_from_tuple(self): table = mock.MockTable((('Key', 'Value'),)) self.assertEqual('Value', table.get('KEY')) self.assertEqual('Value', table['key']) def test_set_and_get(self): table = mock.MockTable() self.assertEqual(None, table.get('Key')) table['Key'] = 'Value' self.assertEqual('Value', table.get('Key')) self.assertEqual('Value', table.get('key')) self.assertEqual('Value', table.get('KEY')) self.assertEqual('Value', table['Key']) self.assertEqual('Value', table['key']) self.assertEqual('Value', table['KEY']) if __name__ == '__main__': unittest.main() # vi:sts=4 sw=4 et
mpl-2.0
JerryLead/spark
examples/src/main/python/ml/lda_example.py
54
1900
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.clustering import LDA # $example off$ from pyspark.sql import SparkSession """ An example demonstrating LDA. Run with: bin/spark-submit examples/src/main/python/ml/lda_example.py """ if __name__ == "__main__": spark = SparkSession \ .builder \ .appName("LDAExample") \ .getOrCreate() # $example on$ # Loads data. dataset = spark.read.format("libsvm").load("data/mllib/sample_lda_libsvm_data.txt") # Trains a LDA model. lda = LDA(k=10, maxIter=10) model = lda.fit(dataset) ll = model.logLikelihood(dataset) lp = model.logPerplexity(dataset) print("The lower bound on the log likelihood of the entire corpus: " + str(ll)) print("The upper bound on perplexity: " + str(lp)) # Describe topics. topics = model.describeTopics(3) print("The topics described by their top-weighted terms:") topics.show(truncate=False) # Shows the result transformed = model.transform(dataset) transformed.show(truncate=False) # $example off$ spark.stop()
apache-2.0
all-of-us/raw-data-repository
rdr_service/alembic/versions/cd009f1475ff_deceased_report_cause_of_death.py
1
1935
"""deceased report cause of death Revision ID: cd009f1475ff Revises: a4ab3afcb460 Create Date: 2020-09-08 11:48:27.208496 """ from alembic import op import sqlalchemy as sa import rdr_service.model.utils from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity from rdr_service.model.base import add_table_history_table, drop_table_history_table from rdr_service.model.code import CodeType from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus # revision identifiers, used by Alembic. revision = 'cd009f1475ff' down_revision = 'a4ab3afcb460' branch_labels = None depends_on = None def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() def upgrade_rdr(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('deceased_report', sa.Column('cause_of_death', sa.String(length=1024), nullable=True)) # ### end Alembic commands ### def downgrade_rdr(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('deceased_report', 'cause_of_death') # ### end Alembic commands ### def upgrade_metrics(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def downgrade_metrics(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###
bsd-3-clause
malcolmr/vroom
vroom/runner.py
3
5918
"""The Vroom test runner. Does the heavy lifting.""" import sys import vroom import vroom.actions import vroom.args import vroom.buffer import vroom.command import vroom.environment import vroom.output import vroom.shell import vroom.test import vroom.vim from vroom.result import Result # Pylint is not smart enough to notice that all the exceptions here inherit from # vroom.test.Failure, which is a standard Exception. # pylint: disable-msg=nonstandard-exception class Vroom(object): """Executes vroom tests.""" def __init__(self, filename, args): """Creates the vroom test. Args: filename: The name of the file to execute. args: The vroom command line flags. """ self._message_strictness = args.message_strictness self._system_strictness = args.system_strictness self._lineno = None # Whether this vroom instance has left the terminal in an unknown state. self.dirty = False self.env = vroom.environment.Environment(filename, args) self.ResetCommands() def ResetCommands(self): self._running_command = None self._command_queue = [] def GetCommand(self): if not self._command_queue: self.PushCommand(None, None) return self._command_queue[-1] def PushCommand(self, line, delay=None): self._command_queue.append( vroom.command.Command(line, self._lineno, delay or 0, self.env)) def ExecuteCommands(self): if not self._command_queue: return self.env.buffer.Unload() for self._running_command in self._command_queue: result = self._running_command.Execute() if result.IsError() and result.value.IsSignificant(): raise result.value self.ResetCommands() def __call__(self, filehandle): """Runs vroom on a file. Args: filehandle: The open file to run on. Returns: A writer to write the test output later. """ lines = list(filehandle) try: self.env.writer.Begin(lines) self.env.vim.Start() self.Run(lines) except vroom.ParseError as e: self.Record(vroom.test.RESULT.ERROR, e) except vroom.test.Failure as e: self.Record(vroom.test.RESULT.FAILED, e) except vroom.vim.Quit as e: # TODO(dbarnett): Revisit this when terminal reset is no longer necessary. if e.is_fatal: raise self.Record(vroom.test.RESULT.ERROR, e) except Exception: self.env.writer.actions.Exception(*sys.exc_info()) finally: if not self.env.args.interactive: if not self.env.vim.Quit(): self.dirty = True self.env.vim.Kill() status = self.env.writer.Status() if status != vroom.output.STATUS.PASS and self.env.args.interactive: self.env.vim.Output(self.env.writer) self.env.vim.process.wait() return self.env.writer def Record(self, result, error=None): """Add context to an error and log it. The current line number is added to the context when possible. Args: result: The log type, should be a member of vroom.test.RESULT error: The exception, if any. """ # Figure out the line where the event happened. if self._running_command and self._running_command.lineno is not None: lineno = self._running_command.lineno elif self._lineno is not None: lineno = self._lineno else: lineno = getattr(error, 'lineno', None) if lineno is not None: self.env.writer.actions.Log(result, lineno, error) else: self.env.writer.actions.Error(result, error) def Test(self, function, *args, **kwargs): self.ExecuteCommands() function(*args, **kwargs) def Run(self, lines): """Runs a vroom file. Args: lines: List of lines in the file. """ actions = list(vroom.actions.Parse(lines)) for (self._lineno, action, line, controls) in actions: if action == vroom.actions.ACTION.PASS: # Line breaks send you back to the top of the buffer. self.env.buffer.Unload() # Line breaks distinguish between consecutive system hijacks. self.GetCommand().LineBreak() elif action == vroom.actions.ACTION.TEXT: self.PushCommand('i%s<ESC>' % line, **controls) elif action == vroom.actions.ACTION.COMMAND: self.PushCommand(':%s<CR>' % line, **controls) elif action == vroom.actions.ACTION.INPUT: self.PushCommand(line, **controls) elif action == vroom.actions.ACTION.MESSAGE: self.GetCommand().ExpectMessage(line, **controls) elif action == vroom.actions.ACTION.SYSTEM: self.GetCommand().ExpectSyscall(line, **controls) elif action == vroom.actions.ACTION.HIJACK: self.GetCommand().RespondToSyscall(line, **controls) elif action == vroom.actions.ACTION.DIRECTIVE: if line == vroom.actions.DIRECTIVE.CLEAR: self.ExecuteCommands() self.env.writer.actions.Log(vroom.test.RESULT.PASSED, self._lineno) self.env.vim.Clear() elif line == vroom.actions.DIRECTIVE.END: self.Test(self.env.buffer.EnsureAtEnd, **controls) elif line == vroom.actions.DIRECTIVE.MESSAGES: self.ExecuteCommands() strictness = controls.get('messages') or self._message_strictness self.env.message_strictness = strictness elif line == vroom.actions.DIRECTIVE.SYSTEM: self.ExecuteCommands() strictness = controls.get('system') or self._system_strictness self.env.system_strictness = strictness else: raise vroom.ConfigurationError('Unrecognized directive "%s"' % line) elif action == vroom.actions.ACTION.OUTPUT: self.Test(self.env.buffer.Verify, line, **controls) else: raise vroom.ConfigurationError('Unrecognized action "%s"' % action) self.ExecuteCommands() self.env.writer.actions.Log(vroom.test.RESULT.PASSED, self._lineno or 0) self.env.vim.Quit()
apache-2.0
ubic135/odoo-design
addons/website_event/models/event.py
6
3773
# -*- coding: utf-8 -*- from openerp import models, fields, api, _ # from openerp.osv import osv, fields from openerp import SUPERUSER_ID from openerp.models import NewId # from openerp.tools.translate import _ import re from openerp.addons.website.models.website import slug class event(models.Model): _name = 'event.event' _inherit = ['event.event', 'website.seo.metadata'] _track = { 'website_published': { 'website_event.mt_event_published': lambda self, cr, uid, obj, ctx=None: obj.website_published, 'website_event.mt_event_unpublished': lambda self, cr, uid, obj, ctx=None: not obj.website_published }, } twitter_hashtag = fields.Char('Twitter Hashtag', default=lambda self: self._default_hashtag()) website_published = fields.Boolean('Visible in Website', copy=False) # TDE TODO FIXME: when website_mail/mail_thread.py inheritance work -> this field won't be necessary website_message_ids = fields.One2many( 'mail.message', 'res_id', domain=lambda self: [ '&', ('model', '=', self._name), ('type', '=', 'comment') ], string='Website Messages', help="Website communication history", ) website_url = fields.Char('Website url', compute='_website_url') @api.one @api.depends('name') def _website_url(self): if isinstance(self.id, NewId): self.website_url = '' else: self.website_url = "/event/" + slug(self) def _default_hashtag(self): return re.sub("[- \\.\\(\\)\\@\\#\\&]+", "", self.env.user.company_id.name).lower() show_menu = fields.Boolean('Has Dedicated Menu', compute='_get_show_menu', inverse='_set_show_menu') menu_id = fields.Many2one('website.menu', 'Event Menu') @api.one def _get_new_menu_pages(self): todo = [ (_('Introduction'), 'website_event.template_intro'), (_('Location'), 'website_event.template_location') ] result = [] for name, path in todo: complete_name = name + ' ' + self.name newpath = self.env['website'].new_page(complete_name, path, ispage=False) url = "/event/" + slug(self) + "/page/" + newpath result.append((name, url)) result.append((_('Register'), '/event/%s/register' % slug(self))) return result @api.one def _set_show_menu(self): if self.menu_id and not self.show_menu: self.menu_id.unlink() elif self.show_menu and not self.menu_id: root_menu = self.env['website.menu'].create({'name': self.name}) to_create_menus = self._get_new_menu_pages()[0] # TDE CHECK api.one -> returns a list with one item ? seq = 0 for name, url in to_create_menus: self.env['website.menu'].create({ 'name': name, 'url': url, 'parent_id': root_menu.id, 'sequence': seq, }) seq += 1 self.menu_id = root_menu @api.one def _get_show_menu(self): self.show_menu = bool(self.menu_id) def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None): event = self.browse(cr, uid, ids[0], context=context) if event.address_id: return self.browse(cr, SUPERUSER_ID, ids[0], context=context).address_id.google_map_img() return None def google_map_link(self, cr, uid, ids, zoom=8, context=None): event = self.browse(cr, uid, ids[0], context=context) if event.address_id: return self.browse(cr, SUPERUSER_ID, ids[0], context=context).address_id.google_map_link() return None
agpl-3.0
tboyce021/home-assistant
homeassistant/components/apple_tv/__init__.py
2
12223
"""The Apple TV integration.""" import asyncio import logging from random import randrange from pyatv import connect, exceptions, scan from pyatv.const import Protocol from homeassistant.components.media_player import DOMAIN as MP_DOMAIN from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN from homeassistant.const import ( CONF_ADDRESS, CONF_NAME, CONF_PROTOCOL, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import callback from homeassistant.helpers import device_registry as dr from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from .const import CONF_CREDENTIALS, CONF_IDENTIFIER, CONF_START_OFF, DOMAIN _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Apple TV" BACKOFF_TIME_UPPER_LIMIT = 300 # Five minutes NOTIFICATION_TITLE = "Apple TV Notification" NOTIFICATION_ID = "apple_tv_notification" SOURCE_REAUTH = "reauth" SIGNAL_CONNECTED = "apple_tv_connected" SIGNAL_DISCONNECTED = "apple_tv_disconnected" PLATFORMS = [MP_DOMAIN, REMOTE_DOMAIN] async def async_setup(hass, config): """Set up the Apple TV integration.""" return True async def async_setup_entry(hass, entry): """Set up a config entry for Apple TV.""" manager = AppleTVManager(hass, entry) hass.data.setdefault(DOMAIN, {})[entry.unique_id] = manager async def on_hass_stop(event): """Stop push updates when hass stops.""" await manager.disconnect() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop) async def setup_platforms(): """Set up platforms and initiate connection.""" await asyncio.gather( *[ hass.config_entries.async_forward_entry_setup(entry, component) for component in PLATFORMS ] ) await manager.init() hass.async_create_task(setup_platforms()) return True async def async_unload_entry(hass, entry): """Unload an Apple TV config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, platform) for platform in PLATFORMS ] ) ) if unload_ok: manager = hass.data[DOMAIN].pop(entry.unique_id) await manager.disconnect() return unload_ok class AppleTVEntity(Entity): """Device that sends commands to an Apple TV.""" def __init__(self, name, identifier, manager): """Initialize device.""" self.atv = None self.manager = manager self._name = name self._identifier = identifier async def async_added_to_hass(self): """Handle when an entity is about to be added to Home Assistant.""" @callback def _async_connected(atv): """Handle that a connection was made to a device.""" self.atv = atv self.async_device_connected(atv) self.async_write_ha_state() @callback def _async_disconnected(): """Handle that a connection to a device was lost.""" self.async_device_disconnected() self.atv = None self.async_write_ha_state() self.async_on_remove( async_dispatcher_connect( self.hass, f"{SIGNAL_CONNECTED}_{self._identifier}", _async_connected ) ) self.async_on_remove( async_dispatcher_connect( self.hass, f"{SIGNAL_DISCONNECTED}_{self._identifier}", _async_disconnected, ) ) def async_device_connected(self, atv): """Handle when connection is made to device.""" def async_device_disconnected(self): """Handle when connection was lost to device.""" @property def name(self): """Return the name of the device.""" return self._name @property def unique_id(self): """Return a unique ID.""" return self._identifier @property def should_poll(self): """No polling needed for Apple TV.""" return False class AppleTVManager: """Connection and power manager for an Apple TV. An instance is used per device to share the same power state between several platforms. It also manages scanning and connection establishment in case of problems. """ def __init__(self, hass, config_entry): """Initialize power manager.""" self.config_entry = config_entry self.hass = hass self.atv = None self._is_on = not config_entry.options.get(CONF_START_OFF, False) self._connection_attempts = 0 self._connection_was_lost = False self._task = None async def init(self): """Initialize power management.""" if self._is_on: await self.connect() def connection_lost(self, _): """Device was unexpectedly disconnected. This is a callback function from pyatv.interface.DeviceListener. """ _LOGGER.warning('Connection lost to Apple TV "%s"', self.atv.name) if self.atv: self.atv.close() self.atv = None self._connection_was_lost = True self._dispatch_send(SIGNAL_DISCONNECTED) self._start_connect_loop() def connection_closed(self): """Device connection was (intentionally) closed. This is a callback function from pyatv.interface.DeviceListener. """ if self.atv: self.atv.close() self.atv = None self._dispatch_send(SIGNAL_DISCONNECTED) self._start_connect_loop() async def connect(self): """Connect to device.""" self._is_on = True self._start_connect_loop() async def disconnect(self): """Disconnect from device.""" _LOGGER.debug("Disconnecting from device") self._is_on = False try: if self.atv: self.atv.push_updater.listener = None self.atv.push_updater.stop() self.atv.close() self.atv = None if self._task: self._task.cancel() self._task = None except Exception: # pylint: disable=broad-except _LOGGER.exception("An error occurred while disconnecting") def _start_connect_loop(self): """Start background connect loop to device.""" if not self._task and self.atv is None and self._is_on: self._task = asyncio.create_task(self._connect_loop()) else: _LOGGER.debug( "Not starting connect loop (%s, %s)", self.atv is None, self._is_on ) async def _connect_loop(self): """Connect loop background task function.""" _LOGGER.debug("Starting connect loop") # Try to find device and connect as long as the user has said that # we are allowed to connect and we are not already connected. while self._is_on and self.atv is None: try: conf = await self._scan() if conf: await self._connect(conf) except exceptions.AuthenticationError: self._auth_problem() break except asyncio.CancelledError: pass except Exception: # pylint: disable=broad-except _LOGGER.exception("Failed to connect") self.atv = None if self.atv is None: self._connection_attempts += 1 backoff = min( randrange(2 ** self._connection_attempts), BACKOFF_TIME_UPPER_LIMIT ) _LOGGER.debug("Reconnecting in %d seconds", backoff) await asyncio.sleep(backoff) _LOGGER.debug("Connect loop ended") self._task = None def _auth_problem(self): """Problem to authenticate occurred that needs intervention.""" _LOGGER.debug("Authentication error, reconfigure integration") name = self.config_entry.data.get(CONF_NAME) identifier = self.config_entry.unique_id self.hass.components.persistent_notification.create( "An irrecoverable connection problem occurred when connecting to " f"`f{name}`. Please go to the Integrations page and reconfigure it", title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID, ) # Add to event queue as this function is called from a task being # cancelled from disconnect asyncio.create_task(self.disconnect()) self.hass.async_create_task( self.hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_REAUTH}, data={CONF_NAME: name, CONF_IDENTIFIER: identifier}, ) ) async def _scan(self): """Try to find device by scanning for it.""" identifier = self.config_entry.unique_id address = self.config_entry.data[CONF_ADDRESS] protocol = Protocol(self.config_entry.data[CONF_PROTOCOL]) _LOGGER.debug("Discovering device %s", identifier) atvs = await scan( self.hass.loop, identifier=identifier, protocol=protocol, hosts=[address] ) if atvs: return atvs[0] _LOGGER.debug( "Failed to find device %s with address %s, trying to scan", identifier, address, ) atvs = await scan(self.hass.loop, identifier=identifier, protocol=protocol) if atvs: return atvs[0] _LOGGER.debug("Failed to find device %s, trying later", identifier) return None async def _connect(self, conf): """Connect to device.""" credentials = self.config_entry.data[CONF_CREDENTIALS] session = async_get_clientsession(self.hass) for protocol, creds in credentials.items(): conf.set_credentials(Protocol(int(protocol)), creds) _LOGGER.debug("Connecting to device %s", self.config_entry.data[CONF_NAME]) self.atv = await connect(conf, self.hass.loop, session=session) self.atv.listener = self self._dispatch_send(SIGNAL_CONNECTED, self.atv) self._address_updated(str(conf.address)) await self._async_setup_device_registry() self._connection_attempts = 0 if self._connection_was_lost: _LOGGER.info( 'Connection was re-established to Apple TV "%s"', self.atv.service.name ) self._connection_was_lost = False async def _async_setup_device_registry(self): attrs = { "identifiers": {(DOMAIN, self.config_entry.unique_id)}, "manufacturer": "Apple", "name": self.config_entry.data[CONF_NAME], } if self.atv: dev_info = self.atv.device_info attrs["model"] = "Apple TV " + dev_info.model.name.replace("Gen", "") attrs["sw_version"] = dev_info.version if dev_info.mac: attrs["connections"] = {(dr.CONNECTION_NETWORK_MAC, dev_info.mac)} device_registry = await dr.async_get_registry(self.hass) device_registry.async_get_or_create( config_entry_id=self.config_entry.entry_id, **attrs ) @property def is_connecting(self): """Return true if connection is in progress.""" return self._task is not None def _address_updated(self, address): """Update cached address in config entry.""" _LOGGER.debug("Changing address to %s", address) self.hass.config_entries.async_update_entry( self.config_entry, data={**self.config_entry.data, CONF_ADDRESS: address} ) def _dispatch_send(self, signal, *args): """Dispatch a signal to all entities managed by this manager.""" async_dispatcher_send( self.hass, f"{signal}_{self.config_entry.unique_id}", *args )
apache-2.0
kleins11/intdatasci-byte2
lib/jinja2/bccache.py
346
12793
# -*- coding: utf-8 -*- """ jinja2.bccache ~~~~~~~~~~~~~~ This module implements the bytecode cache system Jinja is optionally using. This is useful if you have very complex template situations and the compiliation of all those templates slow down your application too much. Situations where this is useful are often forking web applications that are initialized on the first request. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ from os import path, listdir import os import sys import stat import errno import marshal import tempfile import fnmatch from hashlib import sha1 from jinja2.utils import open_if_exists from jinja2._compat import BytesIO, pickle, PY2, text_type # marshal works better on 3.x, one hack less required if not PY2: marshal_dump = marshal.dump marshal_load = marshal.load else: def marshal_dump(code, f): if isinstance(f, file): marshal.dump(code, f) else: f.write(marshal.dumps(code)) def marshal_load(f): if isinstance(f, file): return marshal.load(f) return marshal.loads(f.read()) bc_version = 2 # magic version used to only change with new jinja versions. With 2.6 # we change this to also take Python version changes into account. The # reason for this is that Python tends to segfault if fed earlier bytecode # versions because someone thought it would be a good idea to reuse opcodes # or make Python incompatible with earlier versions. bc_magic = 'j2'.encode('ascii') + \ pickle.dumps(bc_version, 2) + \ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) class Bucket(object): """Buckets are used to store the bytecode for one template. It's created and initialized by the bytecode cache and passed to the loading functions. The buckets get an internal checksum from the cache assigned and use this to automatically reject outdated cache material. Individual bytecode cache subclasses don't have to care about cache invalidation. """ def __init__(self, environment, key, checksum): self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self): """Resets the bucket (unloads the bytecode).""" self.code = None def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return # if marshal_load fails then we need to reload try: self.code = marshal_load(f) except (EOFError, ValueError, TypeError): self.reset() return def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError('can\'t write empty bucket') f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) def bytecode_from_string(self, string): """Load bytecode from a string.""" self.load_bytecode(BytesIO(string)) def bytecode_to_string(self): """Return the bytecode as string.""" out = BytesIO() self.write_bytecode(out) return out.getvalue() class BytecodeCache(object): """To implement your own bytecode cache you have to subclass this class and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of these methods are passed a :class:`~jinja2.bccache.Bucket`. A very basic bytecode cache that saves the bytecode on the file system:: from os import path class MyCache(BytecodeCache): def __init__(self, directory): self.directory = directory def load_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) if path.exists(filename): with open(filename, 'rb') as f: bucket.load_bytecode(f) def dump_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) with open(filename, 'wb') as f: bucket.write_bytecode(f) A more advanced version of a filesystem based bytecode cache is part of Jinja2. """ def load_bytecode(self, bucket): """Subclasses have to override this method to load bytecode into a bucket. If they are not able to find code in the cache for the bucket, it must not do anything. """ raise NotImplementedError() def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError() def clear(self): """Clears the cache. This method is not used by Jinja2 but should be implemented to allow applications to clear the bytecode cache used by a particular environment. """ def get_cache_key(self, name, filename=None): """Returns the unique hash key for this template name.""" hash = sha1(name.encode('utf-8')) if filename is not None: filename = '|' + filename if isinstance(filename, text_type): filename = filename.encode('utf-8') hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): """Returns a checksum for the source.""" return sha1(source.encode('utf-8')).hexdigest() def get_bucket(self, environment, name, filename, source): """Return a cache bucket for the given template. All arguments are mandatory but filename may be `None`. """ key = self.get_cache_key(name, filename) checksum = self.get_source_checksum(source) bucket = Bucket(environment, key, checksum) self.load_bytecode(bucket) return bucket def set_bucket(self, bucket): """Put the bucket into the cache.""" self.dump_bytecode(bucket) class FileSystemBytecodeCache(BytecodeCache): """A bytecode cache that stores bytecode on the filesystem. It accepts two arguments: The directory where the cache items are stored and a pattern string that is used to build the filename. If no directory is specified a default cache directory is selected. On Windows the user's temp directory is used, on UNIX systems a directory is created for the user in the system temp directory. The pattern can be used to have multiple separate caches operate on the same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` is replaced with the cache key. >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') This bytecode cache supports clearing of the cache using the clear method. """ def __init__(self, directory=None, pattern='__jinja2_%s.cache'): if directory is None: directory = self._get_default_cache_dir() self.directory = directory self.pattern = pattern def _get_default_cache_dir(self): def _unsafe_dir(): raise RuntimeError('Cannot determine safe temp directory. You ' 'need to explicitly provide one.') tmpdir = tempfile.gettempdir() # On windows the temporary directory is used specific unless # explicitly forced otherwise. We can just use that. if os.name == 'nt': return tmpdir if not hasattr(os, 'getuid'): _unsafe_dir() dirname = '_jinja2-cache-%d' % os.getuid() actual_dir = os.path.join(tmpdir, dirname) try: os.mkdir(actual_dir, stat.S_IRWXU) except OSError as e: if e.errno != errno.EEXIST: raise try: os.chmod(actual_dir, stat.S_IRWXU) actual_dir_stat = os.lstat(actual_dir) if actual_dir_stat.st_uid != os.getuid() \ or not stat.S_ISDIR(actual_dir_stat.st_mode) \ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: _unsafe_dir() except OSError as e: if e.errno != errno.EEXIST: raise actual_dir_stat = os.lstat(actual_dir) if actual_dir_stat.st_uid != os.getuid() \ or not stat.S_ISDIR(actual_dir_stat.st_mode) \ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: _unsafe_dir() return actual_dir def _get_cache_filename(self, bucket): return path.join(self.directory, self.pattern % bucket.key) def load_bytecode(self, bucket): f = open_if_exists(self._get_cache_filename(bucket), 'rb') if f is not None: try: bucket.load_bytecode(f) finally: f.close() def dump_bytecode(self, bucket): f = open(self._get_cache_filename(bucket), 'wb') try: bucket.write_bytecode(f) finally: f.close() def clear(self): # imported lazily here because google app-engine doesn't support # write access on the file system and the function does not exist # normally. from os import remove files = fnmatch.filter(listdir(self.directory), self.pattern % '*') for filename in files: try: remove(path.join(self.directory, filename)) except OSError: pass class MemcachedBytecodeCache(BytecodeCache): """This class implements a bytecode cache that uses a memcache cache for storing the information. It does not enforce a specific memcache library (tummy's memcache or cmemcache) but will accept any class that provides the minimal interface required. Libraries compatible with this class: - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache - `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_ - `cmemcache <http://gijsbert.org/cmemcache/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only unicode. You can however pass the underlying cache client to the bytecode cache which is available as `django.core.cache.cache._client`.) The minimal interface for the client passed to the constructor is this: .. class:: MinimalClientInterface .. method:: set(key, value[, timeout]) Stores the bytecode in the cache. `value` is a string and `timeout` the timeout of the key. If timeout is not provided a default timeout or no timeout should be assumed, if it's provided it's an integer with the number of seconds the cache item should exist. .. method:: get(key) Returns the value for the cache key. If the item does not exist in the cache the return value must be `None`. The other arguments to the constructor are the prefix for all keys that is added before the actual cache key and the timeout for the bytecode in the cache system. We recommend a high (or no) timeout. This bytecode cache does not support clearing of used items in the cache. The clear method is a no-operation function. .. versionadded:: 2.7 Added support for ignoring memcache errors through the `ignore_memcache_errors` parameter. """ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None, ignore_memcache_errors=True): self.client = client self.prefix = prefix self.timeout = timeout self.ignore_memcache_errors = ignore_memcache_errors def load_bytecode(self, bucket): try: code = self.client.get(self.prefix + bucket.key) except Exception: if not self.ignore_memcache_errors: raise code = None if code is not None: bucket.bytecode_from_string(code) def dump_bytecode(self, bucket): args = (self.prefix + bucket.key, bucket.bytecode_to_string()) if self.timeout is not None: args += (self.timeout,) try: self.client.set(*args) except Exception: if not self.ignore_memcache_errors: raise
apache-2.0
flibbertigibbet/DRIVER
app/data/tasks/export_csv.py
2
17878
import csv import os import zipfile import tempfile import time import StringIO import pytz from django.conf import settings from django.contrib.auth.models import User from celery import shared_task from celery.utils.log import get_task_logger from django_redis import get_redis_connection from data.models import DriverRecord from driver_auth.permissions import is_admin_or_writer logger = get_task_logger(__name__) local_tz = pytz.timezone(settings.TIME_ZONE) def _utf8(value): """ Helper for properly encoding values that may contain unicode characters. From https://github.com/azavea/django-queryset-csv/blob/master/djqscsv/djqscsv.py#L174 :param value: The string to encode """ if isinstance(value, str): return value elif isinstance(value, unicode): return value.encode('utf-8') else: return unicode(value).encode('utf-8') def _sanitize(value): """ Helper for sanitizing the record type label to ensure it doesn't contain characters that are invalid in filenames such as slashes. This keeps spaces, periods, underscores, and all unicode characters. :param value: The string to sanitize """ return ''.join(char for char in value if char.isalnum() or char in [' ', '.', '_']).rstrip() @shared_task(track_started=True) def export_csv(query_key, user_id): """Exports a set of records to a series of CSV files and places them in a compressed tarball :param query_key: A UUID corresponding to a cached SQL query which will be used to filter which records are returned. This is the same key used to generate filtered Windshaft tiles so that the CSV will correspond to the filters applied in the UI. """ # Get Records records = get_queryset_by_key(query_key) # Get the most recent Schema for the Records' RecordType # This assumes that all of the Records have the same RecordType. try: record_type = records[0].schema.record_type schema = record_type.get_current_schema() except IndexError: raise Exception('Filter includes no records') # Get user user = User.objects.get(pk=user_id) # Create files and CSV Writers from Schema if is_admin_or_writer(user): record_writer = DriverRecordExporter(schema) else: record_writer = ReadOnlyRecordExporter(schema) # Write records to files for rec in records: record_writer.write_record(rec) record_writer.finish() # Compress files into a single zipfile. # TODO: Figure out how to transfer files to web users from celery workers # external_attr is 4 bytes ins size. The high order two bytes represend UNIX permission and # file type bits, while the low order two contain MS-DOS FAT file attributes, most notably # bit 4 marking directories # For information on setting file permissions in zipfile, see # http://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip filename = "{}-{}.zip".format(_utf8(_sanitize(record_type.plural_label)), query_key[:8]) path = os.path.join(settings.CELERY_EXPORTS_FILE_PATH, filename) archive = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED) # Add a directory for the schema we're outputting dirname = 'schema-' + str(schema.pk) + '/' for f, name in record_writer.get_files_and_names(): t = time.struct_time(time.localtime(time.time())) info = zipfile.ZipInfo(filename=dirname + name, date_time=( t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec )) info.external_attr = 0755 << 16L with open(f.name, 'r') as z: archive.writestr(info, z.read()) archive.close() # Cleanup record_writer.cleanup() return os.path.basename(archive.filename) def get_sql_string_by_key(key): """Returns a SQL string from Redis using key :param key: A UUID pointing to the SQL string """ # Since the records list endpoint bypasses the Django caching framework, do that here too redis_conn = get_redis_connection('default') return redis_conn.get(key) def get_queryset_by_key(key): """Returns a queryset by filtering Records using the SQL stored in Redis at key :param key: A UUID specifying the SQL string to use """ sql_str = get_sql_string_by_key(key) return DriverRecord.objects.raw(sql_str) class DriverRecordExporter(object): """Exports Records matching a schema to CSVs""" def __init__(self, schema_obj): # Detect related info types and set up CSV Writers as necessary self.schema = schema_obj.schema # Make output writers and output files self.rec_writer = self.make_record_and_details_writer() # All non-details related info types self.writers = {related: self.make_related_info_writer(related, subschema) for related, subschema in self.schema['definitions'].viewitems() if not subschema.get('details')} self.rec_outfile, self.outfiles = self.setup_output_files() self.write_headers() def setup_output_files(self): """Create the output files necessary for writing CSVs""" # Using NamedTemporaryFiles is necessary for creating tarballs containing temp files # https://bugs.python.org/issue21044 rec_outfile = tempfile.NamedTemporaryFile(delete=False) outfiles = {related: tempfile.NamedTemporaryFile(delete=False) for related, subschema in self.schema['definitions'].iteritems() if not subschema.get('details')} return (rec_outfile, outfiles) def write_headers(self): """Write CSV headers to output files""" # Write CSV header to all files self.rec_writer.write_header(self.rec_outfile) for related_name, writer in self.writers.viewitems(): writer.write_header(self.outfiles[related_name]) def finish(self): """Close all open file handles""" self.rec_outfile.close() for f in self.outfiles.values(): f.close() def cleanup(self): """Deletes all temporary files""" os.remove(self.rec_outfile.name) for f in self.outfiles.values(): os.remove(f.name) def get_files_and_names(self): """Return all file objects maintained by this exporter along with suggested names""" yield (self.rec_outfile, 'records.csv') for related_name, out_file in self.outfiles.viewitems(): yield (out_file, related_name + '.csv') def write_record(self, rec): """Pass rec's fields through all writers to output all info as CSVs""" # First the constants writer self.rec_writer.write_record(rec, self.rec_outfile) # Next, use the related info writers to output to the appropriate files for related_name, writer in self.writers.viewitems(): if related_name in rec.data: if writer.is_multiple: for item in rec.data[related_name]: writer.write_related(rec.pk, item, self.outfiles[related_name]) else: writer.write_related(rec.pk, rec.data[related_name], self.outfiles[related_name]) def make_constants_csv_writer(self): """Generate a Record Writer capable of writing out the non-json fields of a Record""" def render_date(d): return d.astimezone(local_tz).strftime('%Y-%m-%d %H:%M:%S') # TODO: Currently this is hard-coded; it may be worthwhile to make this introspect Record # to figure out which fields to use, but that will be somewhat involved. csv_columns = ['record_id', 'timezone', 'created', 'modified', 'occurred_from', 'occurred_to', 'lat', 'lon', 'location_text', 'city', 'city_district', 'county', 'neighborhood', 'road', 'state', 'weather', 'light'] # Model field from which to get data for each csv column source_fields = { 'record_id': 'uuid', 'timezone': None, 'lat': 'geom', 'lon': 'geom' } # Some model fields need to be transformed before they can go into a CSV value_transforms = { 'record_id': lambda uuid: str(uuid), 'timezone': lambda _: settings.TIME_ZONE, 'created': render_date, 'modified': render_date, 'occurred_from': render_date, 'occurred_to': render_date, 'lat': lambda geom: geom.y, 'lon': lambda geom: geom.x, } return RecordModelWriter(csv_columns, source_fields, value_transforms) def make_related_info_writer(self, info_name, info_definition, include_record_id=True): """Generate a RelatedInfoExporter capable of writing out a particular related info field :param info_definition: The definitions entry providing the sub-schema to write out. """ # Need to drop Media fields; we can't export them to CSV usefully. drop_keys = dict() for prop, attributes in info_definition['properties'].iteritems(): if 'media' in attributes: drop_keys[prop] = None return RelatedInfoWriter(info_name, info_definition, field_transform=drop_keys, include_record_id=include_record_id) def make_record_and_details_writer(self): """Generate a writer to put record fields and details in one CSV""" model_writer = self.make_constants_csv_writer() details = {key: subschema for key, subschema in self.schema['definitions'].viewitems() if subschema.get('details') is True} details_key = details.keys()[0] details_writer = self.make_related_info_writer(details_key, details[details_key], include_record_id=False) return ModelAndDetailsWriter(model_writer, details_writer, details_key) class ReadOnlyRecordExporter(DriverRecordExporter): """Export only fields which read-only users are allow to access""" def __init__(self, schema_obj): # Don't write any related info fields, just details only. self.schema = schema_obj.schema # Make output writers and output files self.rec_writer = self.make_record_and_details_writer() self.writers = dict() self.rec_outfile, self.outfiles = self.setup_output_files() self.write_headers() def setup_output_files(self): """Create the output files necessary for writing CSVs""" # Using NamedTemporaryFiles is necessary for creating tarballs containing temp files # https://bugs.python.org/issue21044 rec_outfile = tempfile.NamedTemporaryFile(delete=False) outfiles = dict() return (rec_outfile, outfiles) class BaseRecordWriter(object): """Base class for some common functions that exporters need""" def write_header(self, csv_file): """Write the CSV header to csv_file""" # Need to sanitize CSV columns to utf-8 before writing header_columns = [_utf8(col) for col in self.csv_columns] writer = csv.DictWriter(csv_file, fieldnames=header_columns) writer.writeheader() class ModelAndDetailsWriter(BaseRecordWriter): """Exports records' model fields, and the *Details field, to a single CSV""" def __init__(self, model_writer, details_writer, details_key): """Creates a combined writer :param model_writer: A RecordModelWriter instance that will be used to write model fields :param details_writer: A RelatedInfoWriter instance that will be used to write Details """ self.model_writer = model_writer self.details_writer = details_writer self.details_key = details_key def merge_lines(self, lines_str): """Merge lines written by separate CSV writers to a single line by replacing '\r\n' with ',' """ return lines_str.replace('\r\n', ',').rstrip(',') + '\r\n' def write_header(self, csv_file): """Write writer headers to a CSV file""" output = StringIO.StringIO() self.model_writer.write_header(output) self.details_writer.write_header(output) csv_file.write(self.merge_lines(output.getvalue())) def write_record(self, record, csv_file): """Pull data from a record, send to appropriate writers, and then combine output""" output = StringIO.StringIO() self.model_writer.write_record(record, output) self.details_writer.write_related(record.pk, record.data[self.details_key], output) csv_file.write(self.merge_lines(output.getvalue())) class RecordModelWriter(BaseRecordWriter): """Exports records' model fields to CSV""" def __init__(self, csv_columns, source_fields=dict(), value_transforms=dict()): """Creates a record exporter :param csv_columns: List of columns names to write out to the CSV. E.g. ['latitude', 'longitude'] :param source_fields: Dictionary mapping column names to the name of the model field where the appropriate value can be found. E.g. {'latitude': 'geom', 'longitude': 'geom'} Pulls from attributes with the same name as the column name by default :param value_transforms: Dictionary mapping column names to functions by which to transform model field values before writing to the CSV. E.g. {'latitude': lambda geom: geom.y} If a field is not included here, it will be used directly """ self.csv_columns = csv_columns self.source_fields = source_fields self.value_transforms = value_transforms def write_record(self, record, csv_file): """Pull field data from record object, transform, write to csv_file""" output_data = dict() for column in self.csv_columns: model_value = self.get_model_value_for_column(record, column) csv_val = self.transform_model_value(model_value, column) output_data[column] = _utf8(csv_val) writer = csv.DictWriter(csv_file, fieldnames=self.csv_columns) writer.writerow(output_data) def get_model_value_for_column(self, record, column): """Gets the value from the appropriate model field to populate column""" # Get the value from record.<source_field> if a <source_field> is defined for <column>, # otherwise get it from record.<column> model_field = self.source_fields.get(column, column) if model_field is None: return None return getattr(record, model_field) def transform_model_value(self, value, column): """Transforms value into an appropriate value for column""" # Pass the value through any necessary transformation before output. val_transform = self.value_transforms.get(column, lambda v: v) return val_transform(value) class RelatedInfoWriter(BaseRecordWriter): """Exports related info properties to CSV""" def __init__(self, info_name, info_definition, field_transform=dict(), include_record_id=True): # Construct a field name mapping; this allows dropping Media fields from CSVs and # allows renaming _localid to something more useful. The final output will be a mapping # of all fields in the related info definition to the corresponding field that should # be output in the CSV. If a field name is mapped to None then it is dropped. self.property_transform = field_transform try: for prop in info_definition['properties']: if prop not in self.property_transform: self.property_transform[prop] = prop except KeyError: raise ValueError("Related info definition has no 'properties'; can't detect fields") self.property_transform['_localId'] = info_name + '_id' info_columns = [col for col in self.property_transform.values() if col is not None] self.output_record_id = include_record_id if self.output_record_id: # Need to label every row with the id of the record it relates to self.csv_columns = ['record_id'] + info_columns else: self.csv_columns = info_columns self.is_multiple = info_definition.get('multiple', False) def write_related(self, record_id, related_info, csv_file): """Transform related_info and write to csv_file""" # Transform output_data = self.transform_value_keys(related_info) # Append record_id if self.output_record_id: output_data['record_id'] = record_id # Write writer = csv.DictWriter(csv_file, fieldnames=self.csv_columns) writer.writerow(output_data) def transform_value_keys(self, related_info): """Set incoming values to new keys in output_data based on self.property_transform""" output_data = dict() for in_key, out_key in self.property_transform.viewitems(): if out_key is not None: try: # Assign the value of the input data to the renamed key in the output data output_data[out_key] = _utf8(related_info.pop(in_key)) except KeyError: # in_key doesn't exist in input; this is fine, the CSV writer will handle it pass return output_data
gpl-3.0
marshall007/rethinkdb
packaging/osx/create_dmg.py
8
7673
#!/usr/bin/python # -*- coding: utf-8 -*- '''Create the dmg for MacOS deployment''' import atexit, copy, os, re, shutil, subprocess, sys, tempfile thisFolder = os.path.dirname(os.path.realpath(__file__)) sys.path.append(thisFolder) import dmgbuild # == defaults scratchFolder = tempfile.mkdtemp() packagePosition = (470, 170) defaultOptions = { 'format': 'UDBZ', 'badge_icon': os.path.join(thisFolder, 'Thinker.icns'), 'files': [ os.path.join(thisFolder, os.path.pardir, os.path.pardir, 'COPYRIGHT') ], 'icon_size': 64.0, 'text_size': 14.0, 'icon_locations': { 'Uninstall RethinkDB.app': (630, 170), 'Release Notes.url': (470, 303), 'COPYRIGHT': (630, 303) }, 'background': os.path.join(thisFolder, 'dmg_background.png'), 'window_rect': ((200, 200), (739, 420)), 'default_view': 'icon-view', 'show_icon_preview': True } # == def removeAtExit(removePath): if os.path.exists(removePath): try: if os.path.isfile(removePath): os.unlink(removePath) elif os.path.isdir(removePath): shutil.rmtree(removePath, ignore_errors=True) except Exception as e: sys.stderr.write('Unable to delete item: %s -- %s\n' % (removePath, str(e))) atexit.register(removeAtExit, scratchFolder) def compileUninstallApp(): outputPath = os.path.join(scratchFolder, 'Uninstall RethinkDB.app') # - compile the app logFile = open(os.path.join(scratchFolder, 'uninstall-compile.log'), 'w+') try: subprocess.check_call(['/usr/bin/osacompile', '-o', outputPath, os.path.join(thisFolder, 'uninstall.scpt')], stdout=logFile, stderr=logFile) except Exception as e: logFile.seek(0) sys.stderr.write('Failed while compiling %s: %s\n%s' % (os.path.join(thisFolder, 'uninstall.scpt'), str(e), logFile.read())) raise # - change the icon - ToDo: re-do this once an icon is chosen #iconPath = os.path.join(outputPath, 'Contents', 'Resources', 'applet.icns') #os.unlink(iconPath) #os.symlink('/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources/Unsupported.icns', iconPath) # - return outputPath def makeReleaseNotesLink(version): notesPath = os.path.join(scratchFolder, 'Release Notes.url') with open(notesPath, 'w') as outputFile: outputFile.write('[InternetShortcut]\nURL=https://github.com/rethinkdb/rethinkdb/releases/tag/v%s\n' % version) return notesPath def buildPackage(versionString, serverRootPath, signingName=None): '''Generate a .pkg with all of our customizations''' # == check for the identity if signingName is not None: signingName = str(signingName) foundSigningIdentity = False for line in subprocess.check_output(['/usr/bin/security', 'find-identity', '-p', 'macappstore', '-v']).splitlines(): if signingName in line: foundSigningIdentity = True break if foundSigningIdentity is False: raise ValueError('Could not find the requested signingName: %s' % signingName) # == build the component packages # - make the server package packageFolder = os.path.join(scratchFolder, 'packages') if not os.path.isdir(packageFolder): os.mkdir(packageFolder) serverPackagePath = os.path.join(packageFolder, 'rethinkdb_server.pkg') logFile = open(os.path.join(scratchFolder, 'rethinkdb_server_pkg.log'), 'w+') try: subprocess.check_call(['/usr/bin/pkgbuild', '--root', serverRootPath, '--identifier', 'com.rethinkdb.server', '--version', versionString, serverPackagePath], stdout=logFile, stderr=logFile) except Exception as e: logFile.seek(0) sys.stderr.write('Failed while building server package: %s\n%s' % (str(e), logFile.read())) raise # == assemble the archive distributionPath = os.path.join(scratchFolder, 'rethinkdb-%s.pkg' % versionString) productBuildCommand = ['/usr/bin/productbuild', '--distribution', os.path.join(thisFolder, 'Distribution.xml'), '--package-path', packageFolder, '--resources', os.path.join(thisFolder, 'installer_resources'), distributionPath] if signingName is not None: productBuildCommand += ['--sign', signingName] logFile = open(os.path.join(scratchFolder, 'rethinkdb_pkg.log'), 'w+') try: subprocess.check_call(productBuildCommand, stdout=logFile, stderr=logFile) except Exception as e: logFile.seek(0) sys.stderr.write('Failed while compiling %s: %s\n%s' % (os.path.join(thisFolder, 'uninstall.scpt'), str(e), logFile.read())) raise return distributionPath def main(): '''Parse command line input and run''' global scratchFolder # == process input import optparse parser = optparse.OptionParser() parser.add_option('-s', '--server-root', dest='serverRoot', default=None, help='path to root of the server component') parser.add_option('-o', '--ouptut-location', dest='outputPath', help='location for the output file') parser.add_option( '--rethinkdb-name', dest='binaryName', default='rethinkdb', help='name of the rethinkdb server binary') parser.add_option( '--signing-name', dest='signingName', default=None, help='signing identifier') parser.add_option( '--scratch-folder', dest='scratchFolder', default=None, help='folder for intermediate products') options, args = parser.parse_args() if len(args) > 0: parser.error('no non-keyword options are allowed') # = -s/--server-root if options.serverRoot is None: parser.error('-s/--server-root is required') if not os.path.isdir(options.serverRoot): parser.error('-s/--server-root must be a folder: %s' % options.serverRoot) options.serverRoot = os.path.realpath(options.serverRoot) # = get the version of rethinkdb # find the binary rethinkdbPath = os.path.join(options.serverRoot, 'usr', 'local', 'bin', 'rethinkdb') if not os.access(rethinkdbPath, os.X_OK): parser.error('No runnable RethinkDB executable at: %s' % rethinkdbPath) # get the version string versionString = '' try: versionString = subprocess.check_output([rethinkdbPath, '--version']).strip().split()[1].decode('utf-8') except Exception as e: print(e) parser.error('the executable given is not a valid RethinkDB executable: %s' % rethinkdbPath) strictVersion = re.match('^(\d+\.?)+', versionString) if strictVersion is None: parser.error('version string from executable does not have a regular version string: %s' % versionString) strictVersion = strictVersion.group() # = -o/--ouptut-location if options.outputPath is None: options.outputPath = os.path.join(thisFolder, 'RethinkDB ' + versionString + '.dmg') elif os.path.isdir(options.outputPath): options.outputPath = os.path.join(options.outputPath, 'RethinkDB ' + versionString + '.dmg') elif not os.path.isdir(os.path.dirname(options.outputPath)): parser.error('the output path given is not valid: %s' % options.outputPath) # = --scratch-folder if options.scratchFolder is not None: if not os.path.isdir(options.scratchFolder): parser.error('the --scratch-folder given is not an existing folder: %s' % options.scratchFolder) scratchFolder = options.scratchFolder # == build the pkg pkgPath = buildPackage(versionString, options.serverRoot, signingName=options.signingName) # == add dynamic content to settings dmgOptions = copy.deepcopy(defaultOptions) # = package dmgOptions['files'].append(pkgPath) dmgOptions['icon_locations'][os.path.basename(pkgPath)] = packagePosition # = uninstall script uninstallAppPath = compileUninstallApp() dmgOptions['files'].append(uninstallAppPath) # = release notes dmgOptions['files'].append(makeReleaseNotesLink(strictVersion)) # == dmg creation dmgbuild.build_dmg(options.outputPath, 'RethinkDB ' + versionString, defines=dmgOptions) if __name__ == '__main__': main()
agpl-3.0
Sjc1000/PyRC
UI/NickName.py
1
1047
#!/usr/bin/env python3 from gi.repository import Gtk, Pango class NickName(): servers = {} active_server = None active_channel = None def __init__(self, MainWindow): self.MainWindow = MainWindow self.position = [0, 9, 1, 1] def prebuild(self): self.MainWindow.ui_plugins['ServerList'].position[3] -= 1 return None def build(self): self.text = Gtk.Label() self.text.set_text('') self.MainWindow.grid.attach(self.text, *self.position) return None def add_server(self, server): self.servers[server] = '' return None def change_nickname(self, server, new_nickname): self.servers[server] = new_nickname self.text.set_markup('<span weight="ultrabold">' + new_nickname + '</span>') return None def activate_path(self, server, channel, clicked=False): self.active_channel = channel self.active_server = server self.change_nickname(server, self.servers[server]) return None
gpl-2.0
igel-kun/pyload
module/lib/thrift/transport/TTransport.py
74
8384
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from cStringIO import StringIO from struct import pack,unpack from thrift.Thrift import TException class TTransportException(TException): """Custom Transport Exception class""" UNKNOWN = 0 NOT_OPEN = 1 ALREADY_OPEN = 2 TIMED_OUT = 3 END_OF_FILE = 4 def __init__(self, type=UNKNOWN, message=None): TException.__init__(self, message) self.type = type class TTransportBase: """Base class for Thrift transport layer.""" def isOpen(self): pass def open(self): pass def close(self): pass def read(self, sz): pass def readAll(self, sz): buff = '' have = 0 while (have < sz): chunk = self.read(sz-have) have += len(chunk) buff += chunk if len(chunk) == 0: raise EOFError() return buff def write(self, buf): pass def flush(self): pass # This class should be thought of as an interface. class CReadableTransport: """base class for transports that are readable from C""" # TODO(dreiss): Think about changing this interface to allow us to use # a (Python, not c) StringIO instead, because it allows # you to write after reading. # NOTE: This is a classic class, so properties will NOT work # correctly for setting. @property def cstringio_buf(self): """A cStringIO buffer that contains the current chunk we are reading.""" pass def cstringio_refill(self, partialread, reqlen): """Refills cstringio_buf. Returns the currently used buffer (which can but need not be the same as the old cstringio_buf). partialread is what the C code has read from the buffer, and should be inserted into the buffer before any more reads. The return value must be a new, not borrowed reference. Something along the lines of self._buf should be fine. If reqlen bytes can't be read, throw EOFError. """ pass class TServerTransportBase: """Base class for Thrift server transports.""" def listen(self): pass def accept(self): pass def close(self): pass class TTransportFactoryBase: """Base class for a Transport Factory""" def getTransport(self, trans): return trans class TBufferedTransportFactory: """Factory transport that builds buffered transports""" def getTransport(self, trans): buffered = TBufferedTransport(trans) return buffered class TBufferedTransport(TTransportBase,CReadableTransport): """Class that wraps another transport and buffers its I/O. The implementation uses a (configurable) fixed-size read buffer but buffers all writes until a flush is performed. """ DEFAULT_BUFFER = 4096 def __init__(self, trans, rbuf_size = DEFAULT_BUFFER): self.__trans = trans self.__wbuf = StringIO() self.__rbuf = StringIO("") self.__rbuf_size = rbuf_size def isOpen(self): return self.__trans.isOpen() def open(self): return self.__trans.open() def close(self): return self.__trans.close() def read(self, sz): ret = self.__rbuf.read(sz) if len(ret) != 0: return ret self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size))) return self.__rbuf.read(sz) def write(self, buf): self.__wbuf.write(buf) def flush(self): out = self.__wbuf.getvalue() # reset wbuf before write/flush to preserve state on underlying failure self.__wbuf = StringIO() self.__trans.write(out) self.__trans.flush() # Implement the CReadableTransport interface. @property def cstringio_buf(self): return self.__rbuf def cstringio_refill(self, partialread, reqlen): retstring = partialread if reqlen < self.__rbuf_size: # try to make a read of as much as we can. retstring += self.__trans.read(self.__rbuf_size) # but make sure we do read reqlen bytes. if len(retstring) < reqlen: retstring += self.__trans.readAll(reqlen - len(retstring)) self.__rbuf = StringIO(retstring) return self.__rbuf class TMemoryBuffer(TTransportBase, CReadableTransport): """Wraps a cStringIO object as a TTransport. NOTE: Unlike the C++ version of this class, you cannot write to it then immediately read from it. If you want to read from a TMemoryBuffer, you must either pass a string to the constructor. TODO(dreiss): Make this work like the C++ version. """ def __init__(self, value=None): """value -- a value to read from for stringio If value is set, this will be a transport for reading, otherwise, it is for writing""" if value is not None: self._buffer = StringIO(value) else: self._buffer = StringIO() def isOpen(self): return not self._buffer.closed def open(self): pass def close(self): self._buffer.close() def read(self, sz): return self._buffer.read(sz) def write(self, buf): self._buffer.write(buf) def flush(self): pass def getvalue(self): return self._buffer.getvalue() # Implement the CReadableTransport interface. @property def cstringio_buf(self): return self._buffer def cstringio_refill(self, partialread, reqlen): # only one shot at reading... raise EOFError() class TFramedTransportFactory: """Factory transport that builds framed transports""" def getTransport(self, trans): framed = TFramedTransport(trans) return framed class TFramedTransport(TTransportBase, CReadableTransport): """Class that wraps another transport and frames its I/O when writing.""" def __init__(self, trans,): self.__trans = trans self.__rbuf = StringIO() self.__wbuf = StringIO() def isOpen(self): return self.__trans.isOpen() def open(self): return self.__trans.open() def close(self): return self.__trans.close() def read(self, sz): ret = self.__rbuf.read(sz) if len(ret) != 0: return ret self.readFrame() return self.__rbuf.read(sz) def readFrame(self): buff = self.__trans.readAll(4) sz, = unpack('!i', buff) self.__rbuf = StringIO(self.__trans.readAll(sz)) def write(self, buf): self.__wbuf.write(buf) def flush(self): wout = self.__wbuf.getvalue() wsz = len(wout) # reset wbuf before write/flush to preserve state on underlying failure self.__wbuf = StringIO() # N.B.: Doing this string concatenation is WAY cheaper than making # two separate calls to the underlying socket object. Socket writes in # Python turn out to be REALLY expensive, but it seems to do a pretty # good job of managing string buffer operations without excessive copies buf = pack("!i", wsz) + wout self.__trans.write(buf) self.__trans.flush() # Implement the CReadableTransport interface. @property def cstringio_buf(self): return self.__rbuf def cstringio_refill(self, prefix, reqlen): # self.__rbuf will already be empty here because fastbinary doesn't # ask for a refill until the previous buffer is empty. Therefore, # we can start reading new frames immediately. while len(prefix) < reqlen: self.readFrame() prefix += self.__rbuf.getvalue() self.__rbuf = StringIO(prefix) return self.__rbuf class TFileObjectTransport(TTransportBase): """Wraps a file-like object to make it work as a Thrift transport.""" def __init__(self, fileobj): self.fileobj = fileobj def isOpen(self): return True def close(self): self.fileobj.close() def read(self, sz): return self.fileobj.read(sz) def write(self, buf): self.fileobj.write(buf) def flush(self): self.fileobj.flush()
gpl-3.0
assad2012/shadowsocks
shadowsocks/daemon.py
694
5602
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2014-2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import os import sys import logging import signal import time from shadowsocks import common, shell # this module is ported from ShadowVPN daemon.c def daemon_exec(config): if 'daemon' in config: if os.name != 'posix': raise Exception('daemon mode is only supported on Unix') command = config['daemon'] if not command: command = 'start' pid_file = config['pid-file'] log_file = config['log-file'] if command == 'start': daemon_start(pid_file, log_file) elif command == 'stop': daemon_stop(pid_file) # always exit after daemon_stop sys.exit(0) elif command == 'restart': daemon_stop(pid_file) daemon_start(pid_file, log_file) else: raise Exception('unsupported daemon command %s' % command) def write_pid_file(pid_file, pid): import fcntl import stat try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: shell.print_exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % common.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, common.to_bytes(str(pid))) return 0 def freopen(f, mode, stream): oldf = open(f, mode) oldfd = oldf.fileno() newfd = stream.fileno() os.close(newfd) os.dup2(oldfd, newfd) def daemon_start(pid_file, log_file): def handle_exit(signum, _): if signum == signal.SIGTERM: sys.exit(0) sys.exit(1) signal.signal(signal.SIGINT, handle_exit) signal.signal(signal.SIGTERM, handle_exit) # fork only once because we are sure parent will exit pid = os.fork() assert pid != -1 if pid > 0: # parent waits for its child time.sleep(5) sys.exit(0) # child signals its parent to exit ppid = os.getppid() pid = os.getpid() if write_pid_file(pid_file, pid) != 0: os.kill(ppid, signal.SIGINT) sys.exit(1) os.setsid() signal.signal(signal.SIGHUP, signal.SIG_IGN) print('started') os.kill(ppid, signal.SIGTERM) sys.stdin.close() try: freopen(log_file, 'a', sys.stdout) freopen(log_file, 'a', sys.stderr) except IOError as e: shell.print_exception(e) sys.exit(1) def daemon_stop(pid_file): import errno try: with open(pid_file) as f: buf = f.read() pid = common.to_str(buf) if not buf: logging.error('not running') except IOError as e: shell.print_exception(e) if e.errno == errno.ENOENT: # always exit 0 if we are sure daemon is not running logging.error('not running') return sys.exit(1) pid = int(pid) if pid > 0: try: os.kill(pid, signal.SIGTERM) except OSError as e: if e.errno == errno.ESRCH: logging.error('not running') # always exit 0 if we are sure daemon is not running return shell.print_exception(e) sys.exit(1) else: logging.error('pid is not positive: %d', pid) # sleep for maximum 10s for i in range(0, 200): try: # query for the pid os.kill(pid, 0) except OSError as e: if e.errno == errno.ESRCH: break time.sleep(0.05) else: logging.error('timed out when stopping pid %d', pid) sys.exit(1) print('stopped') os.unlink(pid_file) def set_user(username): if username is None: return import pwd import grp try: pwrec = pwd.getpwnam(username) except KeyError: logging.error('user not found: %s' % username) raise user = pwrec[0] uid = pwrec[2] gid = pwrec[3] cur_uid = os.getuid() if uid == cur_uid: return if cur_uid != 0: logging.error('can not set user as nonroot user') # will raise later # inspired by supervisor if hasattr(os, 'setgroups'): groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]] groups.insert(0, gid) os.setgroups(groups) os.setgid(gid) os.setuid(uid)
apache-2.0
oscaro/django
django/core/management/commands/compilemessages.py
53
4119
from __future__ import unicode_literals import codecs import os from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import find_command, popen_wrapper from django.utils._os import npath, upath def has_bom(fn): with open(fn, 'rb') as f: sample = f.read(4) return sample[:3] == b'\xef\xbb\xbf' or \ sample.startswith(codecs.BOM_UTF16_LE) or \ sample.startswith(codecs.BOM_UTF16_BE) def is_writable(path): # Known side effect: updating file access/modified time to current time if # it is writable. try: with open(path, 'a'): os.utime(path, None) except (IOError, OSError): return False return True class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--locale', '-l', dest='locale', action='append', help='locale(s) to process (e.g. de_AT). Default is to process all. Can be used multiple times.'), ) help = 'Compiles .po files to .mo files for use with builtin gettext support.' requires_system_checks = False leave_locale_alone = True program = 'msgfmt' program_options = ['--check-format'] def handle(self, **options): locale = options.get('locale') self.verbosity = int(options.get('verbosity')) if find_command(self.program) is None: raise CommandError("Can't find %s. Make sure you have GNU gettext " "tools 0.15 or newer installed." % self.program) basedirs = [os.path.join('conf', 'locale'), 'locale'] if os.environ.get('DJANGO_SETTINGS_MODULE'): from django.conf import settings basedirs.extend([upath(path) for path in settings.LOCALE_PATHS]) # Gather existing directories. basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs))) if not basedirs: raise CommandError("This script should be run from the Django Git " "checkout or your project or app tree, or with " "the settings module specified.") for basedir in basedirs: if locale: dirs = [os.path.join(basedir, l, 'LC_MESSAGES') for l in locale] else: dirs = [basedir] locations = [] for ldir in dirs: for dirpath, dirnames, filenames in os.walk(ldir): locations.extend((dirpath, f) for f in filenames if f.endswith('.po')) if locations: self.compile_messages(locations) def compile_messages(self, locations): """ Locations is a list of tuples: [(directory, file), ...] """ for i, (dirpath, f) in enumerate(locations): if self.verbosity > 0: self.stdout.write('processing file %s in %s\n' % (f, dirpath)) po_path = os.path.join(dirpath, f) if has_bom(po_path): raise CommandError("The %s file has a BOM (Byte Order Mark). " "Django only supports .po files encoded in " "UTF-8 and without any BOM." % po_path) base_path = os.path.splitext(po_path)[0] # Check writability on first location if i == 0 and not is_writable(npath(base_path + '.mo')): self.stderr.write("The po files under %s are in a seemingly not " "writable location. mo files will not be updated/created." % dirpath) return args = [self.program] + self.program_options + ['-o', npath(base_path + '.mo'), npath(base_path + '.po')] output, errors, status = popen_wrapper(args) if status: if errors: msg = "Execution of %s failed: %s" % (self.program, errors) else: msg = "Execution of %s failed" % self.program raise CommandError(msg)
bsd-3-clause
shadowmint/nwidget
lib/pyglet-1.4.4/pyglet/window/mouse.py
43
2458
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''Mouse constants and utilities for pyglet.window. ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' def buttons_string(buttons): '''Return a string describing a set of active mouse buttons. Example:: >>> buttons_string(LEFT | RIGHT) 'LEFT|RIGHT' :Parameters: `buttons` : int Bitwise combination of mouse button constants. :rtype: str ''' button_names = [] if buttons & LEFT: button_names.append('LEFT') if buttons & MIDDLE: button_names.append('MIDDLE') if buttons & RIGHT: button_names.append('RIGHT') return '|'.join(button_names) # Symbolic names for the mouse buttons LEFT = 1 << 0 MIDDLE = 1 << 1 RIGHT = 1 << 2
apache-2.0
bpsinc-native/src_third_party_pywebsocket_src
example/abort_handshake_wsh.py
465
1781
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from mod_pywebsocket import handshake def web_socket_do_extra_handshake(request): raise handshake.AbortedByUserException( "Aborted in web_socket_do_extra_handshake") def web_socket_transfer_data(request): pass # vi:sts=4 sw=4 et
bsd-3-clause
redhat-openstack/nova
nova/tests/test_test_utils.py
30
2511
# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import socket import tempfile import fixtures from nova import db from nova import test from nova.tests import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): # get_test_admin_context's return value behaves like admin context. ctxt = test_utils.get_test_admin_context() # TODO(soren): This should verify the full interface context # objects expose. self.assertTrue(ctxt.is_admin) def test_get_test_instance(self): # get_test_instance's return value looks like an instance_ref. instance_ref = test_utils.get_test_instance() ctxt = test_utils.get_test_admin_context() db.instance_get(ctxt, instance_ref['id']) def _test_get_test_network_info(self): """Does the return value match a real network_info structure.""" # The challenge here is to define what exactly such a structure # must look like. pass def test_ipv6_supported(self): self.assertIn(test_utils.is_ipv6_supported(), (False, True)) def fake_open(path): raise IOError def fake_socket_fail(x, y): e = socket.error() e.errno = errno.EAFNOSUPPORT raise e def fake_socket_ok(x, y): return tempfile.TemporaryFile() with fixtures.MonkeyPatch('socket.socket', fake_socket_fail): self.assertFalse(test_utils.is_ipv6_supported()) with fixtures.MonkeyPatch('socket.socket', fake_socket_ok): with fixtures.MonkeyPatch('sys.platform', 'windows'): self.assertTrue(test_utils.is_ipv6_supported()) with fixtures.MonkeyPatch('sys.platform', 'linux2'): with fixtures.MonkeyPatch('__builtin__.open', fake_open): self.assertFalse(test_utils.is_ipv6_supported())
apache-2.0
mysociety/barnetplanning
applications/migrations/0002_auto__add_field_application_ward_mapit_id.py
1
1626
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Application.ward_mapit_id' db.add_column('applications_application', 'ward_mapit_id', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False) def backwards(self, orm): # Deleting field 'Application.ward_mapit_id' db.delete_column('applications_application', 'ward_mapit_id') models = { 'applications.application': { 'Meta': {'object_name': 'Application'}, 'address': ('django.db.models.fields.TextField', [], {}), 'council_reference': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info_url': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'received': ('django.db.models.fields.DateField', [], {}), 'ward_mapit_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) } } complete_apps = ['applications']
agpl-3.0
brightinteractive/cmsplugin-filer
cmsplugin_filer_link/migrations_django/0001_initial.py
13
1682
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import filer.fields.file import cms.models.fields from cmsplugin_filer_link.models import LINK_STYLES class Migration(migrations.Migration): dependencies = [ ('cms', '0003_auto_20140926_2347'), ('filer', '0001_initial'), ] operations = [ migrations.CreateModel( name='FilerLinkPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')), ('name', models.CharField(max_length=255, verbose_name='name')), ('url', models.CharField(max_length=255, null=True, verbose_name='url', blank=True)), ('mailto', models.EmailField(help_text='An email address has priority over both pages and urls', max_length=75, null=True, verbose_name='mailto', blank=True)), ('link_style', models.CharField(max_length=255, verbose_name='link style', default=LINK_STYLES[0][0], choices=LINK_STYLES)), ('new_window', models.BooleanField(default=False, help_text='Do you want this link to open a new window?', verbose_name='new window?')), ('file', filer.fields.file.FilerFileField(blank=True, to='filer.File', null=True)), ('page_link', cms.models.fields.PageField(blank=True, to='cms.Page', help_text='A link to a page has priority over urls.', null=True, verbose_name='page')), ], options={ 'abstract': False, }, bases=('cms.cmsplugin',), ), ]
bsd-3-clause
DanielFSFE/test1
adventure.py
1
13152
import random class Game(object): """ holds all information for a game. Later it may be possible to run several game instances at once """ number = 0 def __init__(self): self.number = Game.number Game.number += 1 self.rooms = {} # dictionary of rooms, key is room number self.items = {} # dictionary of items, key is item number self.monsters = {} # dictionary of monsters, key is m. number self.effects = {} class Player(object): number = 0 def __init__(self, game, where=0, name="hero"): """need game object, like the room class""" self.number = Player.number Player.number += 1 self.name = name self.inventory = [] # list of itemnumbers (player carry items) self.maxcarry = 100 # kg self.carry = 0 # kg self.where = where # start room number class Item(object): number = 0 def __init__(self, game, description="", mass=0): self.number = Item.number Item.number += 1 self.effect = None self.description=description if mass == 0.0: mass = round(random.randint(1,50)) self.mass = mass self.description=description if self.description == "": self.description = random.choice(("helmet","chestplate","pants", "shoes","potion of instant healing","potion of strenght", "potion of speed","potion of regeneration","gold","sword", "bow","arrows","shield","teleport pill")) if self.description == "teleport pill": self.effect = "teleport" self.description = description game.items[self.number] = self # add item into game dict def info(self): txt = "Item Number {}: ".format(self.number) txt += self.description + "\n" return txt class Monster(object): number = 0 def __init__(self, game, adjective="", description="", boss=False): self.number = Monster.number Monster.number += 1 game.monsters[self.number] = self # add monster into game dict self.adjective = adjective self.description = description self.hitpoints = random.randint(5,15) if description == "": if boss: self.adjective = random.choice((" deadly"," dangerous", " creepy"," ugly"," killer")) self.description = random.choice(("Unicorn","Cat", "Teddy Bear","Hamster","Rabbit")) self.hitpoints *= 5 else: self.description = random.choice(("goblin","ork","troll", "mice","rat","dwarf","cave drake")) def info(self): txt = "Monster number {}: {} {} with {} hitpoints\n".format( self.number, self.adjective, self.description, self.hitpoints) return txt class Effect(object): def __init__(self, g, effectname, description="", roomnumber=-1, affectplayer = True, summonalies = 0, summonenemy = 0, teleport = -1, summonitem = -1, destroyitem = 0, highweight = 0, lowweight = 0, healplayer = 0, damageplayer = 0, killenemy = 0): self.effectname = effectname self.roomnumber = roomnumber self.description = description self.affectplayer = affectplayer self.summonalies = summonalies self.sommonenemy = summonenemy self.teleport = teleport self.summonitem = summonitem self.destroyitem = destroyitem self.highweight = highweight self.lowweight = lowweight self.healplayer = healplayer self.damageplayer = damageplayer self.killenemy = killenemy g.effects[self.effectname] = self def action(self, g, p): """g = Game p = Player""" print("The effect does his job") if self.teleport != -1: while True: target = random.choice(g.rooms) if target == 4: continue else: break p.where=target class Room(object): number = 0 def __init__(self, game, description="", connections=[], itemchances=[0.5,0.25,0.1], monsterchances=[0.3,0.2,0.1,0.05], bosschances=[0.0], explored=False): """need game instance""" self.number = Room.number game.rooms[self.number] = self # add room into game dict Room.number += 1 self.explored = explored # True or False self.description = description self.connections = connections self.itemchances = itemchances self.monsterchances = monsterchances self.bosschances = bosschances self.effect = random.randint(1,100) # create items self.itemnumbers = [] # list of indexes of items in this room #self.game = game for chance in self.itemchances: if random.random()< chance: newItem = Item(game) self.itemnumbers.append(newItem.number) # add reference self.monsternumbers = [] # list of indexes of monsters in this room for chance in self.monsterchances: if random.random() < chance: newMonster = Monster(game) self.monsternumbers.append(newMonster.number) # add reference for chance in self.bosschances: if random.random() < chance: newMonster = Monster(game,boss=True) self.monsternumbers.append(newMonster.number) # add reference def info(self, game): """return string with all information about this room""" txt = "Room number {}: ".format(self.number) txt += self.description + "\n" # itmes ? if len(self.itemnumbers) > 0: txt += "You see {} itmes here: \n".format(len(self.itemnumbers)) for i in self.itemnumbers: txt += game.items[i].info() else: txt += "This room has no items\n" # monsters ? if len(self.monsternumbers) > 0: txt +="You see {} monster(s) here:\n".format(len(self.monsternumbers)) for m in self.monsternumbers: txt += game.monsters[m].info() else: txt += "No monsters in this room, fortunately.\n" # doors txt += "You see {} door(s).\n".format(len(self.connections)) txt += "\n" return txt # this function use print, replace later with gui commands def output(txt): """can be later replaced by gui or graphical output""" print(txt) def select_number(list_of_numbers): """The player select *one* number of a list of numbers""" answer = "" while ((not answer.isdecimal()) or int(answer) not in list_of_numbers): answer=input("Please type selected number and press ENTER: ") return int(answer) def show_inventory(game, player): output("\n\n") output("==== Your inventory ====") output("") output("number, description, mass (kg)") output("-------------------------") output("") tmpmass = 0.0 for i in player.inventory: output("{}...{}...{}".format(i, game.items[i].description, game.items[i].mass)) tmpmass += game.items[i].mass output("") output("You're currently carrying {} kg, that is {:.2f}% of your capacity".format( tmpmass, (tmpmass / player.maxcarry)*100)) def drop_item(game, player): for i in player.inventory: output("") print(i,"...........",game.items[i].description) output("") output("Please type selected number and press ENTER") try: selection=int(input(">>> ")) except: output("") output("Wrong input") return if len(player.inventory) > 0: # drop chosen item in inventory player.inventory.remove(selection) game.rooms[player.where].itemnumbers.append(selection) def pickup_item(game, player): for i in game.rooms[player.where].itemnumbers: output("") print(i,"...........",game.items[i].description) output("") output("Please type selected number and press ENTER") try: selection=int(input(">>> ")) except: output("") output("Wrong input") return # can player carry this item tmpmass=0 for j in player.inventory: tmpmass += game.items[j].mass if tmpmass + game.items[selection].mass < player.maxcarry: if len(game.rooms[player.where].itemnumbers) > 0: # pick up chosen item in this room player.inventory.append(selection) game.rooms[player.where].itemnumbers.remove(selection) else: output("") output("You're carrying too much!") output("You have to drop something to carry more stuff!") def use_item(game, player): for i in player.inventory: output("") print(i,"...........",game.items[i].description) output("") output("Please type selected number and press ENTER") try: selection=int(input(">>> ")) except: output("") output("Wrong input") return if len(player.inventory) > 0: # use chosen item in inventory player.inventory.remove(selection) #game.rooms[player.where].itemnumbers.append(selection) # this funciton use input, replace later with gui command def nextAction(game, player): """ask the user to select only one of many options""" output("What do you want to do today ?") connections = game.rooms[player.where].connections names = [] # temp list of room names for c in connections: if game.rooms[c].explored: names.append(game.rooms[c].description) else: names.append("unknown room") output("0.........other actions") for d in enumerate(names, 1): # make list of tuples, start with 1 output("{}.........{}".format(d[0], d[1])) #answer = "" #while ((not answer.isdecimal()) or (int(answer) < 0) or # (int(answer) > len(connections))): # answer = input("please type number and press ENTER:>") answer = select_number(range(len(names)+1)) if answer != 0: return connections[int(answer)-1] # return new room number # other menu options, player remain in same room output("") output("What do you want to do today?") actions = {"d":"drop item", "i":"inspect inventory", "p":"pick up item", "u":"use item", "c":"cancel"} for a in actions: output("{}....{}".format(a, actions[a])) answer = "" while answer not in actions: answer = input("please type selected letter and press ENTER: ") if answer == "i": show_inventory(game, player) elif answer == "d": drop_item(game, player) elif answer == "p": pickup_item(game, player) elif answer == "u": use_item(game, player) return player.where # return the same room number # create a game instance g = Game() # add rooms with description and connections. # Each room will have a unique number and add himself to game # room number 0 Room(g,"starting lobby", [1, 4], explored = True) # room number 1 Room(g,"first room", [0,2,6]) # room number 2 Room(g,"storage room", [1,5,7]) # room number 3 # the boss room has 1 to 6 minions and 1 to 3 bosses Room(g,"boss chamber", [6], monsterchances=[1.0,0.9,0.8,0.5,0.5,0.5], bosschances = [1.0,0.15,0.05]) # room number 4 Room(g,"end of the world (game over)", [], explored=True) # room number 5 Room(g,"npc room", [2,9,10]) # room number 6 Room(g,"gear room", [1,3,10]) # room number 7 Room(g,"trader", [2,5,8]) # room number 8 Room(g,"enemy room", [3,7], monsterchances=[1.0,1.0,1.0,0.9,0.7,0.5,0.2]) # room number 9 Room(g,"empty room", [5,12], itemchances=[]) # room number 10 Room(g,"mini boss", [5,6], monsterchances=[1.0,0.5,0.5,0.5], bosschances = [0.5]) # room number 11 Room(g,"random room", [10,12]) #room number 12 Room(g,"random room", [11,9]) # items i=Item(g,"potion of instant healing",mass=0.25) g.rooms[6].itemnumbers.append(i.number) # puts item i in room 6 # you can use another item for i i=Item(g,"wheel of cheese",mass=0.50) g.rooms[2].itemnumbers.append(i.number) # add effects e = Effect(g,"teleport",teleport=1) e.description = "You wake up in a strange room" # start player in lobby (room 0) # where = 0 # the actual room number p = Player(g, where=0) # create player in room 0 # main loop while len(g.rooms[p.where].connections) > 0: if not g.rooms[p.where].explored: output("You explore a new room!") g.rooms[p.where].explored = True # explore this room output("\n\nYou are now here:\n\n{}".format(g.rooms[p.where].info(g))) p.where = nextAction(g, p) output("\n"*1) output("\n"*1) output("Thank you for playing. Have a nice real life")
gpl-2.0
citrix-openstack-build/ceilometer
ceilometer/openstack/common/rpc/securemessage.py
3
18050
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import collections import os import struct import time import requests from oslo.config import cfg from ceilometer.openstack.common.crypto import utils as cryptoutils from ceilometer.openstack.common import jsonutils from ceilometer.openstack.common import log as logging secure_message_opts = [ cfg.BoolOpt('enabled', default=True, help='Whether Secure Messaging (Signing) is enabled,' ' defaults to enabled'), cfg.BoolOpt('enforced', default=False, help='Whether Secure Messaging (Signing) is enforced,' ' defaults to not enforced'), cfg.BoolOpt('encrypt', default=False, help='Whether Secure Messaging (Encryption) is enabled,' ' defaults to not enabled'), cfg.StrOpt('secret_keys_file', help='Path to the file containing the keys, takes precedence' ' over secret_key'), cfg.MultiStrOpt('secret_key', help='A list of keys: (ex: name:<base64 encoded key>),' ' ignored if secret_keys_file is set'), cfg.StrOpt('kds_endpoint', help='KDS endpoint (ex: http://kds.example.com:35357/v3)'), ] secure_message_group = cfg.OptGroup('secure_messages', title='Secure Messaging options') LOG = logging.getLogger(__name__) class SecureMessageException(Exception): """Generic Exception for Secure Messages.""" msg = "An unknown Secure Message related exception occurred." def __init__(self, msg=None): if msg is None: msg = self.msg super(SecureMessageException, self).__init__(msg) class SharedKeyNotFound(SecureMessageException): """No shared key was found and no other external authentication mechanism is available. """ msg = "Shared Key for [%s] Not Found. (%s)" def __init__(self, name, errmsg): super(SharedKeyNotFound, self).__init__(self.msg % (name, errmsg)) class InvalidMetadata(SecureMessageException): """The metadata is invalid.""" msg = "Invalid metadata: %s" def __init__(self, err): super(InvalidMetadata, self).__init__(self.msg % err) class InvalidSignature(SecureMessageException): """Signature validation failed.""" msg = "Failed to validate signature (source=%s, destination=%s)" def __init__(self, src, dst): super(InvalidSignature, self).__init__(self.msg % (src, dst)) class UnknownDestinationName(SecureMessageException): """The Destination name is unknown to us.""" msg = "Invalid destination name (%s)" def __init__(self, name): super(UnknownDestinationName, self).__init__(self.msg % name) class InvalidEncryptedTicket(SecureMessageException): """The Encrypted Ticket could not be successfully handled.""" msg = "Invalid Ticket (source=%s, destination=%s)" def __init__(self, src, dst): super(InvalidEncryptedTicket, self).__init__(self.msg % (src, dst)) class InvalidExpiredTicket(SecureMessageException): """The ticket received is already expired.""" msg = "Expired ticket (source=%s, destination=%s)" def __init__(self, src, dst): super(InvalidExpiredTicket, self).__init__(self.msg % (src, dst)) class CommunicationError(SecureMessageException): """The Communication with the KDS failed.""" msg = "Communication Error (target=%s): %s" def __init__(self, target, errmsg): super(CommunicationError, self).__init__(self.msg % (target, errmsg)) class InvalidArgument(SecureMessageException): """Bad initialization argument.""" msg = "Invalid argument: %s" def __init__(self, errmsg): super(InvalidArgument, self).__init__(self.msg % errmsg) Ticket = collections.namedtuple('Ticket', ['skey', 'ekey', 'esek']) class KeyStore(object): """A storage class for Signing and Encryption Keys. This class creates an object that holds Generic Keys like Signing Keys, Encryption Keys, Encrypted SEK Tickets ... """ def __init__(self): self._kvps = dict() def _get_key_name(self, source, target, ktype): return (source, target, ktype) def _put(self, src, dst, ktype, expiration, data): name = self._get_key_name(src, dst, ktype) self._kvps[name] = (expiration, data) def _get(self, src, dst, ktype): name = self._get_key_name(src, dst, ktype) if name in self._kvps: expiration, data = self._kvps[name] if expiration > time.time(): return data else: del self._kvps[name] return None def clear(self): """Wipes the store clear of all data.""" self._kvps.clear() def put_ticket(self, source, target, skey, ekey, esek, expiration): """Puts a sek pair in the cache. :param source: Client name :param target: Target name :param skey: The Signing Key :param ekey: The Encription Key :param esek: The token encrypted with the target key :param expiration: Expiration time in seconds since Epoch """ keys = Ticket(skey, ekey, esek) self._put(source, target, 'ticket', expiration, keys) def get_ticket(self, source, target): """Returns a Ticket (skey, ekey, esek) namedtuple for the source/target pair. """ return self._get(source, target, 'ticket') _KEY_STORE = KeyStore() class _KDSClient(object): USER_AGENT = 'oslo-incubator/rpc' def __init__(self, endpoint=None, timeout=None): """A KDS Client class.""" self._endpoint = endpoint if timeout is not None: self.timeout = float(timeout) else: self.timeout = None def _do_get(self, url, request): req_kwargs = dict() req_kwargs['headers'] = dict() req_kwargs['headers']['User-Agent'] = self.USER_AGENT req_kwargs['headers']['Content-Type'] = 'application/json' req_kwargs['data'] = jsonutils.dumps({'request': request}) if self.timeout is not None: req_kwargs['timeout'] = self.timeout try: resp = requests.get(url, **req_kwargs) except requests.ConnectionError as e: err = "Unable to establish connection. %s" % e raise CommunicationError(url, err) return resp def _get_reply(self, url, resp): if resp.text: try: body = jsonutils.loads(resp.text) reply = body['reply'] except (KeyError, TypeError, ValueError): msg = "Failed to decode reply: %s" % resp.text raise CommunicationError(url, msg) else: msg = "No reply data was returned." raise CommunicationError(url, msg) return reply def _get_ticket(self, request, url=None, redirects=10): """Send an HTTP request. Wraps around 'requests' to handle redirects and common errors. """ if url is None: if not self._endpoint: raise CommunicationError(url, 'Endpoint not configured') url = self._endpoint + '/kds/ticket' while redirects: resp = self._do_get(url, request) if resp.status_code in (301, 302, 305): # Redirected. Reissue the request to the new location. url = resp.headers['location'] redirects -= 1 continue elif resp.status_code != 200: msg = "Request returned failure status: %s (%s)" err = msg % (resp.status_code, resp.text) raise CommunicationError(url, err) return self._get_reply(url, resp) raise CommunicationError(url, "Too many redirections, giving up!") def get_ticket(self, source, target, crypto, key): # prepare metadata md = {'requestor': source, 'target': target, 'timestamp': time.time(), 'nonce': struct.unpack('Q', os.urandom(8))[0]} metadata = base64.b64encode(jsonutils.dumps(md)) # sign metadata signature = crypto.sign(key, metadata) # HTTP request reply = self._get_ticket({'metadata': metadata, 'signature': signature}) # verify reply signature = crypto.sign(key, (reply['metadata'] + reply['ticket'])) if signature != reply['signature']: raise InvalidEncryptedTicket(md['source'], md['destination']) md = jsonutils.loads(base64.b64decode(reply['metadata'])) if ((md['source'] != source or md['destination'] != target or md['expiration'] < time.time())): raise InvalidEncryptedTicket(md['source'], md['destination']) # return ticket data tkt = jsonutils.loads(crypto.decrypt(key, reply['ticket'])) return tkt, md['expiration'] # we need to keep a global nonce, as this value should never repeat non # matter how many SecureMessage objects we create _NONCE = None def _get_nonce(): """We keep a single counter per instance, as it is so huge we can't possibly cycle through within 1/100 of a second anyway. """ global _NONCE # Lazy initialize, for now get a random value, multiply by 2^32 and # use it as the nonce base. The counter itself will rotate after # 2^32 increments. if _NONCE is None: _NONCE = [struct.unpack('I', os.urandom(4))[0], 0] # Increment counter and wrap at 2^32 _NONCE[1] += 1 if _NONCE[1] > 0xffffffff: _NONCE[1] = 0 # Return base + counter return long((_NONCE[0] * 0xffffffff)) + _NONCE[1] class SecureMessage(object): """A Secure Message object. This class creates a signing/encryption facility for RPC messages. It encapsulates all the necessary crypto primitives to insulate regular code from the intricacies of message authentication, validation and optionally encryption. :param topic: The topic name of the queue :param host: The server name, together with the topic it forms a unique name that is used to source signing keys, and verify incoming messages. :param conf: a ConfigOpts object :param key: (optional) explicitly pass in endpoint private key. If not provided it will be sourced from the service config :param key_store: (optional) Storage class for local caching :param encrypt: (defaults to False) Whether to encrypt messages :param enctype: (defaults to AES) Cipher to use :param hashtype: (defaults to SHA256) Hash function to use for signatures """ def __init__(self, topic, host, conf, key=None, key_store=None, encrypt=None, enctype='AES', hashtype='SHA256'): conf.register_group(secure_message_group) conf.register_opts(secure_message_opts, group='secure_messages') self._name = '%s.%s' % (topic, host) self._key = key self._conf = conf.secure_messages self._encrypt = self._conf.encrypt if (encrypt is None) else encrypt self._crypto = cryptoutils.SymmetricCrypto(enctype, hashtype) self._hkdf = cryptoutils.HKDF(hashtype) self._kds = _KDSClient(self._conf.kds_endpoint) if self._key is None: self._key = self._init_key(topic, self._name) if self._key is None: err = "Secret Key (or key file) is missing or malformed" raise SharedKeyNotFound(self._name, err) self._key_store = key_store or _KEY_STORE def _init_key(self, topic, name): keys = None if self._conf.secret_keys_file: with open(self._conf.secret_keys_file, 'r') as f: keys = f.readlines() elif self._conf.secret_key: keys = self._conf.secret_key if keys is None: return None for k in keys: if k[0] == '#': continue if ':' not in k: break svc, key = k.split(':', 1) if svc == topic or svc == name: return base64.b64decode(key) return None def _split_key(self, key, size): sig_key = key[:size] enc_key = key[size:] return sig_key, enc_key def _decode_esek(self, key, source, target, timestamp, esek): """This function decrypts the esek buffer passed in and returns a KeyStore to be used to check and decrypt the received message. :param key: The key to use to decrypt the ticket (esek) :param source: The name of the source service :param traget: The name of the target service :param timestamp: The incoming message timestamp :param esek: a base64 encoded encrypted block containing a JSON string """ rkey = None try: s = self._crypto.decrypt(key, esek) j = jsonutils.loads(s) rkey = base64.b64decode(j['key']) expiration = j['timestamp'] + j['ttl'] if j['timestamp'] > timestamp or timestamp > expiration: raise InvalidExpiredTicket(source, target) except Exception: raise InvalidEncryptedTicket(source, target) info = '%s,%s,%s' % (source, target, str(j['timestamp'])) sek = self._hkdf.expand(rkey, info, len(key) * 2) return self._split_key(sek, len(key)) def _get_ticket(self, target): """This function will check if we already have a SEK for the specified target in the cache, or will go and try to fetch a new SEK from the key server. :param target: The name of the target service """ ticket = self._key_store.get_ticket(self._name, target) if ticket is not None: return ticket tkt, expiration = self._kds.get_ticket(self._name, target, self._crypto, self._key) self._key_store.put_ticket(self._name, target, base64.b64decode(tkt['skey']), base64.b64decode(tkt['ekey']), tkt['esek'], expiration) return self._key_store.get_ticket(self._name, target) def encode(self, version, target, json_msg): """This is the main encoding function. It takes a target and a message and returns a tuple consisting of a JSON serialized metadata object, a JSON serialized (and optionally encrypted) message, and a signature. :param version: the current envelope version :param target: The name of the target service (usually with hostname) :param json_msg: a serialized json message object """ ticket = self._get_ticket(target) metadata = jsonutils.dumps({'source': self._name, 'destination': target, 'timestamp': time.time(), 'nonce': _get_nonce(), 'esek': ticket.esek, 'encryption': self._encrypt}) message = json_msg if self._encrypt: message = self._crypto.encrypt(ticket.ekey, message) signature = self._crypto.sign(ticket.skey, version + metadata + message) return (metadata, message, signature) def decode(self, version, metadata, message, signature): """This is the main decoding function. It takes a version, metadata, message and signature strings and returns a tuple with a (decrypted) message and metadata or raises an exception in case of error. :param version: the current envelope version :param metadata: a JSON serialized object with metadata for validation :param message: a JSON serialized (base64 encoded encrypted) message :param signature: a base64 encoded signature """ md = jsonutils.loads(metadata) check_args = ('source', 'destination', 'timestamp', 'nonce', 'esek', 'encryption') for arg in check_args: if arg not in md: raise InvalidMetadata('Missing metadata "%s"' % arg) if md['destination'] != self._name: # TODO(simo) handle group keys by checking target raise UnknownDestinationName(md['destination']) try: skey, ekey = self._decode_esek(self._key, md['source'], md['destination'], md['timestamp'], md['esek']) except InvalidExpiredTicket: raise except Exception: raise InvalidMetadata('Failed to decode ESEK for %s/%s' % ( md['source'], md['destination'])) sig = self._crypto.sign(skey, version + metadata + message) if sig != signature: raise InvalidSignature(md['source'], md['destination']) if md['encryption'] is True: msg = self._crypto.decrypt(ekey, message) else: msg = message return (md, msg)
apache-2.0
MIPS/external-chromium_org
net/tools/testserver/testserver.py
23
73598
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This is a simple HTTP/FTP/TCP/UDP/BASIC_AUTH_PROXY/WEBSOCKET server used for testing Chrome. It supports several test URLs, as specified by the handlers in TestPageHandler. By default, it listens on an ephemeral port and sends the port number back to the originating process over a pipe. The originating process can specify an explicit port if necessary. It can use https if you specify the flag --https=CERT where CERT is the path to a pem file containing the certificate and private key that should be used. """ import base64 import BaseHTTPServer import cgi import hashlib import logging import minica import os import random import re import select import socket import SocketServer import struct import sys import threading import time import urllib import urlparse import zlib import echo_message import pyftpdlib.ftpserver import testserver_base import tlslite import tlslite.api BASE_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.insert( 0, os.path.join(BASE_DIR, '..', '..', '..', 'third_party/pywebsocket/src')) from mod_pywebsocket.standalone import WebSocketServer SERVER_HTTP = 0 SERVER_FTP = 1 SERVER_TCP_ECHO = 2 SERVER_UDP_ECHO = 3 SERVER_BASIC_AUTH_PROXY = 4 SERVER_WEBSOCKET = 5 # Default request queue size for WebSocketServer. _DEFAULT_REQUEST_QUEUE_SIZE = 128 class WebSocketOptions: """Holds options for WebSocketServer.""" def __init__(self, host, port, data_dir): self.request_queue_size = _DEFAULT_REQUEST_QUEUE_SIZE self.server_host = host self.port = port self.websock_handlers = data_dir self.scan_dir = None self.allow_handlers_outside_root_dir = False self.websock_handlers_map_file = None self.cgi_directories = [] self.is_executable_method = None self.allow_draft75 = False self.strict = True self.use_tls = False self.private_key = None self.certificate = None self.tls_client_auth = False self.tls_client_ca = None self.use_basic_auth = False class RecordingSSLSessionCache(object): """RecordingSSLSessionCache acts as a TLS session cache and maintains a log of lookups and inserts in order to test session cache behaviours.""" def __init__(self): self.log = [] def __getitem__(self, sessionID): self.log.append(('lookup', sessionID)) raise KeyError() def __setitem__(self, sessionID, session): self.log.append(('insert', sessionID)) class HTTPServer(testserver_base.ClientRestrictingServerMixIn, testserver_base.BrokenPipeHandlerMixIn, testserver_base.StoppableHTTPServer): """This is a specialization of StoppableHTTPServer that adds client verification.""" pass class OCSPServer(testserver_base.ClientRestrictingServerMixIn, testserver_base.BrokenPipeHandlerMixIn, BaseHTTPServer.HTTPServer): """This is a specialization of HTTPServer that serves an OCSP response""" def serve_forever_on_thread(self): self.thread = threading.Thread(target = self.serve_forever, name = "OCSPServerThread") self.thread.start() def stop_serving(self): self.shutdown() self.thread.join() class HTTPSServer(tlslite.api.TLSSocketServerMixIn, testserver_base.ClientRestrictingServerMixIn, testserver_base.BrokenPipeHandlerMixIn, testserver_base.StoppableHTTPServer): """This is a specialization of StoppableHTTPServer that add https support and client verification.""" def __init__(self, server_address, request_hander_class, pem_cert_and_key, ssl_client_auth, ssl_client_cas, ssl_bulk_ciphers, record_resume_info, tls_intolerant): self.cert_chain = tlslite.api.X509CertChain().parseChain(pem_cert_and_key) # Force using only python implementation - otherwise behavior is different # depending on whether m2crypto Python module is present (error is thrown # when it is). m2crypto uses a C (based on OpenSSL) implementation under # the hood. self.private_key = tlslite.api.parsePEMKey(pem_cert_and_key, private=True, implementations=['python']) self.ssl_client_auth = ssl_client_auth self.ssl_client_cas = [] self.tls_intolerant = tls_intolerant for ca_file in ssl_client_cas: s = open(ca_file).read() x509 = tlslite.api.X509() x509.parse(s) self.ssl_client_cas.append(x509.subject) self.ssl_handshake_settings = tlslite.api.HandshakeSettings() if ssl_bulk_ciphers is not None: self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers if record_resume_info: # If record_resume_info is true then we'll replace the session cache with # an object that records the lookups and inserts that it sees. self.session_cache = RecordingSSLSessionCache() else: self.session_cache = tlslite.api.SessionCache() testserver_base.StoppableHTTPServer.__init__(self, server_address, request_hander_class) def handshake(self, tlsConnection): """Creates the SSL connection.""" try: self.tlsConnection = tlsConnection tlsConnection.handshakeServer(certChain=self.cert_chain, privateKey=self.private_key, sessionCache=self.session_cache, reqCert=self.ssl_client_auth, settings=self.ssl_handshake_settings, reqCAs=self.ssl_client_cas, tlsIntolerant=self.tls_intolerant) tlsConnection.ignoreAbruptClose = True return True except tlslite.api.TLSAbruptCloseError: # Ignore abrupt close. return True except tlslite.api.TLSError, error: print "Handshake failure:", str(error) return False class FTPServer(testserver_base.ClientRestrictingServerMixIn, pyftpdlib.ftpserver.FTPServer): """This is a specialization of FTPServer that adds client verification.""" pass class TCPEchoServer(testserver_base.ClientRestrictingServerMixIn, SocketServer.TCPServer): """A TCP echo server that echoes back what it has received.""" def server_bind(self): """Override server_bind to store the server name.""" SocketServer.TCPServer.server_bind(self) host, port = self.socket.getsockname()[:2] self.server_name = socket.getfqdn(host) self.server_port = port def serve_forever(self): self.stop = False self.nonce_time = None while not self.stop: self.handle_request() self.socket.close() class UDPEchoServer(testserver_base.ClientRestrictingServerMixIn, SocketServer.UDPServer): """A UDP echo server that echoes back what it has received.""" def server_bind(self): """Override server_bind to store the server name.""" SocketServer.UDPServer.server_bind(self) host, port = self.socket.getsockname()[:2] self.server_name = socket.getfqdn(host) self.server_port = port def serve_forever(self): self.stop = False self.nonce_time = None while not self.stop: self.handle_request() self.socket.close() class TestPageHandler(testserver_base.BasePageHandler): # Class variables to allow for persistence state between page handler # invocations rst_limits = {} fail_precondition = {} def __init__(self, request, client_address, socket_server): connect_handlers = [ self.RedirectConnectHandler, self.ServerAuthConnectHandler, self.DefaultConnectResponseHandler] get_handlers = [ self.NoCacheMaxAgeTimeHandler, self.NoCacheTimeHandler, self.CacheTimeHandler, self.CacheExpiresHandler, self.CacheProxyRevalidateHandler, self.CachePrivateHandler, self.CachePublicHandler, self.CacheSMaxAgeHandler, self.CacheMustRevalidateHandler, self.CacheMustRevalidateMaxAgeHandler, self.CacheNoStoreHandler, self.CacheNoStoreMaxAgeHandler, self.CacheNoTransformHandler, self.DownloadHandler, self.DownloadFinishHandler, self.EchoHeader, self.EchoHeaderCache, self.EchoAllHandler, self.ZipFileHandler, self.FileHandler, self.SetCookieHandler, self.SetManyCookiesHandler, self.ExpectAndSetCookieHandler, self.SetHeaderHandler, self.AuthBasicHandler, self.AuthDigestHandler, self.SlowServerHandler, self.ChunkedServerHandler, self.ContentTypeHandler, self.NoContentHandler, self.ServerRedirectHandler, self.ClientRedirectHandler, self.MultipartHandler, self.GetSSLSessionCacheHandler, self.SSLManySmallRecords, self.GetChannelID, self.CloseSocketHandler, self.RangeResetHandler, self.DefaultResponseHandler] post_handlers = [ self.EchoTitleHandler, self.EchoHandler, self.PostOnlyFileHandler] + get_handlers put_handlers = [ self.EchoTitleHandler, self.EchoHandler] + get_handlers head_handlers = [ self.FileHandler, self.DefaultResponseHandler] self._mime_types = { 'crx' : 'application/x-chrome-extension', 'exe' : 'application/octet-stream', 'gif': 'image/gif', 'jpeg' : 'image/jpeg', 'jpg' : 'image/jpeg', 'json': 'application/json', 'pdf' : 'application/pdf', 'wav' : 'audio/wav', 'xml' : 'text/xml' } self._default_mime_type = 'text/html' testserver_base.BasePageHandler.__init__(self, request, client_address, socket_server, connect_handlers, get_handlers, head_handlers, post_handlers, put_handlers) def GetMIMETypeFromName(self, file_name): """Returns the mime type for the specified file_name. So far it only looks at the file extension.""" (_shortname, extension) = os.path.splitext(file_name.split("?")[0]) if len(extension) == 0: # no extension. return self._default_mime_type # extension starts with a dot, so we need to remove it return self._mime_types.get(extension[1:], self._default_mime_type) def NoCacheMaxAgeTimeHandler(self): """This request handler yields a page with the title set to the current system time, and no caching requested.""" if not self._ShouldHandleRequest("/nocachetime/maxage"): return False self.send_response(200) self.send_header('Cache-Control', 'max-age=0') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def NoCacheTimeHandler(self): """This request handler yields a page with the title set to the current system time, and no caching requested.""" if not self._ShouldHandleRequest("/nocachetime"): return False self.send_response(200) self.send_header('Cache-Control', 'no-cache') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheTimeHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for one minute.""" if not self._ShouldHandleRequest("/cachetime"): return False self.send_response(200) self.send_header('Cache-Control', 'max-age=60') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheExpiresHandler(self): """This request handler yields a page with the title set to the current system time, and set the page to expire on 1 Jan 2099.""" if not self._ShouldHandleRequest("/cache/expires"): return False self.send_response(200) self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheProxyRevalidateHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for 60 seconds""" if not self._ShouldHandleRequest("/cache/proxy-revalidate"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=60, proxy-revalidate') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CachePrivateHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for 5 seconds.""" if not self._ShouldHandleRequest("/cache/private"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=3, private') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CachePublicHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for 5 seconds.""" if not self._ShouldHandleRequest("/cache/public"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=3, public') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheSMaxAgeHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow for caching.""" if not self._ShouldHandleRequest("/cache/s-maxage"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheMustRevalidateHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow caching.""" if not self._ShouldHandleRequest("/cache/must-revalidate"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'must-revalidate') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheMustRevalidateMaxAgeHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow caching event though max-age of 60 seconds is specified.""" if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=60, must-revalidate') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheNoStoreHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow the page to be stored.""" if not self._ShouldHandleRequest("/cache/no-store"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'no-store') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheNoStoreMaxAgeHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow the page to be stored even though max-age of 60 seconds is specified.""" if not self._ShouldHandleRequest("/cache/no-store/max-age"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=60, no-store') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheNoTransformHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow the content to transformed during user-agent caching""" if not self._ShouldHandleRequest("/cache/no-transform"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'no-transform') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def EchoHeader(self): """This handler echoes back the value of a specific request header.""" return self.EchoHeaderHelper("/echoheader") def EchoHeaderCache(self): """This function echoes back the value of a specific request header while allowing caching for 16 hours.""" return self.EchoHeaderHelper("/echoheadercache") def EchoHeaderHelper(self, echo_header): """This function echoes back the value of the request header passed in.""" if not self._ShouldHandleRequest(echo_header): return False query_char = self.path.find('?') if query_char != -1: header_name = self.path[query_char+1:] self.send_response(200) self.send_header('Content-Type', 'text/plain') if echo_header == '/echoheadercache': self.send_header('Cache-control', 'max-age=60000') else: self.send_header('Cache-control', 'no-cache') # insert a vary header to properly indicate that the cachability of this # request is subject to value of the request header being echoed. if len(header_name) > 0: self.send_header('Vary', header_name) self.end_headers() if len(header_name) > 0: self.wfile.write(self.headers.getheader(header_name)) return True def ReadRequestBody(self): """This function reads the body of the current HTTP request, handling both plain and chunked transfer encoded requests.""" if self.headers.getheader('transfer-encoding') != 'chunked': length = int(self.headers.getheader('content-length')) return self.rfile.read(length) # Read the request body as chunks. body = "" while True: line = self.rfile.readline() length = int(line, 16) if length == 0: self.rfile.readline() break body += self.rfile.read(length) self.rfile.read(2) return body def EchoHandler(self): """This handler just echoes back the payload of the request, for testing form submission.""" if not self._ShouldHandleRequest("/echo"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(self.ReadRequestBody()) return True def EchoTitleHandler(self): """This handler is like Echo, but sets the page title to the request.""" if not self._ShouldHandleRequest("/echotitle"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() request = self.ReadRequestBody() self.wfile.write('<html><head><title>') self.wfile.write(request) self.wfile.write('</title></head></html>') return True def EchoAllHandler(self): """This handler yields a (more) human-readable page listing information about the request header & contents.""" if not self._ShouldHandleRequest("/echoall"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><style>' 'pre { border: 1px solid black; margin: 5px; padding: 5px }' '</style></head><body>' '<div style="float: right">' '<a href="/echo">back to referring page</a></div>' '<h1>Request Body:</h1><pre>') if self.command == 'POST' or self.command == 'PUT': qs = self.ReadRequestBody() params = cgi.parse_qs(qs, keep_blank_values=1) for param in params: self.wfile.write('%s=%s\n' % (param, params[param][0])) self.wfile.write('</pre>') self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers) self.wfile.write('</body></html>') return True def DownloadHandler(self): """This handler sends a downloadable file with or without reporting the size (6K).""" if self.path.startswith("/download-unknown-size"): send_length = False elif self.path.startswith("/download-known-size"): send_length = True else: return False # # The test which uses this functionality is attempting to send # small chunks of data to the client. Use a fairly large buffer # so that we'll fill chrome's IO buffer enough to force it to # actually write the data. # See also the comments in the client-side of this test in # download_uitest.cc # size_chunk1 = 35*1024 size_chunk2 = 10*1024 self.send_response(200) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Cache-Control', 'max-age=0') if send_length: self.send_header('Content-Length', size_chunk1 + size_chunk2) self.end_headers() # First chunk of data: self.wfile.write("*" * size_chunk1) self.wfile.flush() # handle requests until one of them clears this flag. self.server.wait_for_download = True while self.server.wait_for_download: self.server.handle_request() # Second chunk of data: self.wfile.write("*" * size_chunk2) return True def DownloadFinishHandler(self): """This handler just tells the server to finish the current download.""" if not self._ShouldHandleRequest("/download-finish"): return False self.server.wait_for_download = False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=0') self.end_headers() return True def _ReplaceFileData(self, data, query_parameters): """Replaces matching substrings in a file. If the 'replace_text' URL query parameter is present, it is expected to be of the form old_text:new_text, which indicates that any old_text strings in the file are replaced with new_text. Multiple 'replace_text' parameters may be specified. If the parameters are not present, |data| is returned. """ query_dict = cgi.parse_qs(query_parameters) replace_text_values = query_dict.get('replace_text', []) for replace_text_value in replace_text_values: replace_text_args = replace_text_value.split(':') if len(replace_text_args) != 2: raise ValueError( 'replace_text must be of form old_text:new_text. Actual value: %s' % replace_text_value) old_text_b64, new_text_b64 = replace_text_args old_text = base64.urlsafe_b64decode(old_text_b64) new_text = base64.urlsafe_b64decode(new_text_b64) data = data.replace(old_text, new_text) return data def ZipFileHandler(self): """This handler sends the contents of the requested file in compressed form. Can pass in a parameter that specifies that the content length be C - the compressed size (OK), U - the uncompressed size (Non-standard, but handled), S - less than compressed (OK because we keep going), M - larger than compressed but less than uncompressed (an error), L - larger than uncompressed (an error) Example: compressedfiles/Picture_1.doc?C """ prefix = "/compressedfiles/" if not self.path.startswith(prefix): return False # Consume a request body if present. if self.command == 'POST' or self.command == 'PUT' : self.ReadRequestBody() _, _, url_path, _, query, _ = urlparse.urlparse(self.path) if not query in ('C', 'U', 'S', 'M', 'L'): return False sub_path = url_path[len(prefix):] entries = sub_path.split('/') file_path = os.path.join(self.server.data_dir, *entries) if os.path.isdir(file_path): file_path = os.path.join(file_path, 'index.html') if not os.path.isfile(file_path): print "File not found " + sub_path + " full path:" + file_path self.send_error(404) return True f = open(file_path, "rb") data = f.read() uncompressed_len = len(data) f.close() # Compress the data. data = zlib.compress(data) compressed_len = len(data) content_length = compressed_len if query == 'U': content_length = uncompressed_len elif query == 'S': content_length = compressed_len / 2 elif query == 'M': content_length = (compressed_len + uncompressed_len) / 2 elif query == 'L': content_length = compressed_len + uncompressed_len self.send_response(200) self.send_header('Content-Type', 'application/msword') self.send_header('Content-encoding', 'deflate') self.send_header('Connection', 'close') self.send_header('Content-Length', content_length) self.send_header('ETag', '\'' + file_path + '\'') self.end_headers() self.wfile.write(data) return True def FileHandler(self): """This handler sends the contents of the requested file. Wow, it's like a real webserver!""" prefix = self.server.file_root_url if not self.path.startswith(prefix): return False return self._FileHandlerHelper(prefix) def PostOnlyFileHandler(self): """This handler sends the contents of the requested file on a POST.""" prefix = urlparse.urljoin(self.server.file_root_url, 'post/') if not self.path.startswith(prefix): return False return self._FileHandlerHelper(prefix) def _FileHandlerHelper(self, prefix): request_body = '' if self.command == 'POST' or self.command == 'PUT': # Consume a request body if present. request_body = self.ReadRequestBody() _, _, url_path, _, query, _ = urlparse.urlparse(self.path) query_dict = cgi.parse_qs(query) expected_body = query_dict.get('expected_body', []) if expected_body and request_body not in expected_body: self.send_response(404) self.end_headers() self.wfile.write('') return True expected_headers = query_dict.get('expected_headers', []) for expected_header in expected_headers: header_name, expected_value = expected_header.split(':') if self.headers.getheader(header_name) != expected_value: self.send_response(404) self.end_headers() self.wfile.write('') return True sub_path = url_path[len(prefix):] entries = sub_path.split('/') file_path = os.path.join(self.server.data_dir, *entries) if os.path.isdir(file_path): file_path = os.path.join(file_path, 'index.html') if not os.path.isfile(file_path): print "File not found " + sub_path + " full path:" + file_path self.send_error(404) return True f = open(file_path, "rb") data = f.read() f.close() data = self._ReplaceFileData(data, query) old_protocol_version = self.protocol_version # If file.mock-http-headers exists, it contains the headers we # should send. Read them in and parse them. headers_path = file_path + '.mock-http-headers' if os.path.isfile(headers_path): f = open(headers_path, "r") # "HTTP/1.1 200 OK" response = f.readline() http_major, http_minor, status_code = re.findall( 'HTTP/(\d+).(\d+) (\d+)', response)[0] self.protocol_version = "HTTP/%s.%s" % (http_major, http_minor) self.send_response(int(status_code)) for line in f: header_values = re.findall('(\S+):\s*(.*)', line) if len(header_values) > 0: # "name: value" name, value = header_values[0] self.send_header(name, value) f.close() else: # Could be more generic once we support mime-type sniffing, but for # now we need to set it explicitly. range_header = self.headers.get('Range') if range_header and range_header.startswith('bytes='): # Note this doesn't handle all valid byte range_header values (i.e. # left open ended ones), just enough for what we needed so far. range_header = range_header[6:].split('-') start = int(range_header[0]) if range_header[1]: end = int(range_header[1]) else: end = len(data) - 1 self.send_response(206) content_range = ('bytes ' + str(start) + '-' + str(end) + '/' + str(len(data))) self.send_header('Content-Range', content_range) data = data[start: end + 1] else: self.send_response(200) self.send_header('Content-Type', self.GetMIMETypeFromName(file_path)) self.send_header('Accept-Ranges', 'bytes') self.send_header('Content-Length', len(data)) self.send_header('ETag', '\'' + file_path + '\'') self.end_headers() if (self.command != 'HEAD'): self.wfile.write(data) self.protocol_version = old_protocol_version return True def SetCookieHandler(self): """This handler just sets a cookie, for testing cookie handling.""" if not self._ShouldHandleRequest("/set-cookie"): return False query_char = self.path.find('?') if query_char != -1: cookie_values = self.path[query_char + 1:].split('&') else: cookie_values = ("",) self.send_response(200) self.send_header('Content-Type', 'text/html') for cookie_value in cookie_values: self.send_header('Set-Cookie', '%s' % cookie_value) self.end_headers() for cookie_value in cookie_values: self.wfile.write('%s' % cookie_value) return True def SetManyCookiesHandler(self): """This handler just sets a given number of cookies, for testing handling of large numbers of cookies.""" if not self._ShouldHandleRequest("/set-many-cookies"): return False query_char = self.path.find('?') if query_char != -1: num_cookies = int(self.path[query_char + 1:]) else: num_cookies = 0 self.send_response(200) self.send_header('', 'text/html') for _i in range(0, num_cookies): self.send_header('Set-Cookie', 'a=') self.end_headers() self.wfile.write('%d cookies were sent' % num_cookies) return True def ExpectAndSetCookieHandler(self): """Expects some cookies to be sent, and if they are, sets more cookies. The expect parameter specifies a required cookie. May be specified multiple times. The set parameter specifies a cookie to set if all required cookies are preset. May be specified multiple times. The data parameter specifies the response body data to be returned.""" if not self._ShouldHandleRequest("/expect-and-set-cookie"): return False _, _, _, _, query, _ = urlparse.urlparse(self.path) query_dict = cgi.parse_qs(query) cookies = set() if 'Cookie' in self.headers: cookie_header = self.headers.getheader('Cookie') cookies.update([s.strip() for s in cookie_header.split(';')]) got_all_expected_cookies = True for expected_cookie in query_dict.get('expect', []): if expected_cookie not in cookies: got_all_expected_cookies = False self.send_response(200) self.send_header('Content-Type', 'text/html') if got_all_expected_cookies: for cookie_value in query_dict.get('set', []): self.send_header('Set-Cookie', '%s' % cookie_value) self.end_headers() for data_value in query_dict.get('data', []): self.wfile.write(data_value) return True def SetHeaderHandler(self): """This handler sets a response header. Parameters are in the key%3A%20value&key2%3A%20value2 format.""" if not self._ShouldHandleRequest("/set-header"): return False query_char = self.path.find('?') if query_char != -1: headers_values = self.path[query_char + 1:].split('&') else: headers_values = ("",) self.send_response(200) self.send_header('Content-Type', 'text/html') for header_value in headers_values: header_value = urllib.unquote(header_value) (key, value) = header_value.split(': ', 1) self.send_header(key, value) self.end_headers() for header_value in headers_values: self.wfile.write('%s' % header_value) return True def AuthBasicHandler(self): """This handler tests 'Basic' authentication. It just sends a page with title 'user/pass' if you succeed.""" if not self._ShouldHandleRequest("/auth-basic"): return False username = userpass = password = b64str = "" expected_password = 'secret' realm = 'testrealm' set_cookie_if_challenged = False _, _, url_path, _, query, _ = urlparse.urlparse(self.path) query_params = cgi.parse_qs(query, True) if 'set-cookie-if-challenged' in query_params: set_cookie_if_challenged = True if 'password' in query_params: expected_password = query_params['password'][0] if 'realm' in query_params: realm = query_params['realm'][0] auth = self.headers.getheader('authorization') try: if not auth: raise Exception('no auth') b64str = re.findall(r'Basic (\S+)', auth)[0] userpass = base64.b64decode(b64str) username, password = re.findall(r'([^:]+):(\S+)', userpass)[0] if password != expected_password: raise Exception('wrong password') except Exception, e: # Authentication failed. self.send_response(401) self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm) self.send_header('Content-Type', 'text/html') if set_cookie_if_challenged: self.send_header('Set-Cookie', 'got_challenged=true') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>Denied: %s</title>' % e) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('b64str=%s<p>' % b64str) self.wfile.write('username: %s<p>' % username) self.wfile.write('userpass: %s<p>' % userpass) self.wfile.write('password: %s<p>' % password) self.wfile.write('You sent:<br>%s<p>' % self.headers) self.wfile.write('</body></html>') return True # Authentication successful. (Return a cachable response to allow for # testing cached pages that require authentication.) old_protocol_version = self.protocol_version self.protocol_version = "HTTP/1.1" if_none_match = self.headers.getheader('if-none-match') if if_none_match == "abc": self.send_response(304) self.end_headers() elif url_path.endswith(".gif"): # Using chrome/test/data/google/logo.gif as the test image test_image_path = ['google', 'logo.gif'] gif_path = os.path.join(self.server.data_dir, *test_image_path) if not os.path.isfile(gif_path): self.send_error(404) self.protocol_version = old_protocol_version return True f = open(gif_path, "rb") data = f.read() f.close() self.send_response(200) self.send_header('Content-Type', 'image/gif') self.send_header('Cache-control', 'max-age=60000') self.send_header('Etag', 'abc') self.end_headers() self.wfile.write(data) else: self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-control', 'max-age=60000') self.send_header('Etag', 'abc') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>%s/%s</title>' % (username, password)) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('You sent:<br>%s<p>' % self.headers) self.wfile.write('</body></html>') self.protocol_version = old_protocol_version return True def GetNonce(self, force_reset=False): """Returns a nonce that's stable per request path for the server's lifetime. This is a fake implementation. A real implementation would only use a given nonce a single time (hence the name n-once). However, for the purposes of unittesting, we don't care about the security of the nonce. Args: force_reset: Iff set, the nonce will be changed. Useful for testing the "stale" response. """ if force_reset or not self.server.nonce_time: self.server.nonce_time = time.time() return hashlib.md5('privatekey%s%d' % (self.path, self.server.nonce_time)).hexdigest() def AuthDigestHandler(self): """This handler tests 'Digest' authentication. It just sends a page with title 'user/pass' if you succeed. A stale response is sent iff "stale" is present in the request path. """ if not self._ShouldHandleRequest("/auth-digest"): return False stale = 'stale' in self.path nonce = self.GetNonce(force_reset=stale) opaque = hashlib.md5('opaque').hexdigest() password = 'secret' realm = 'testrealm' auth = self.headers.getheader('authorization') pairs = {} try: if not auth: raise Exception('no auth') if not auth.startswith('Digest'): raise Exception('not digest') # Pull out all the name="value" pairs as a dictionary. pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth)) # Make sure it's all valid. if pairs['nonce'] != nonce: raise Exception('wrong nonce') if pairs['opaque'] != opaque: raise Exception('wrong opaque') # Check the 'response' value and make sure it matches our magic hash. # See http://www.ietf.org/rfc/rfc2617.txt hash_a1 = hashlib.md5( ':'.join([pairs['username'], realm, password])).hexdigest() hash_a2 = hashlib.md5(':'.join([self.command, pairs['uri']])).hexdigest() if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs: response = hashlib.md5(':'.join([hash_a1, nonce, pairs['nc'], pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest() else: response = hashlib.md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest() if pairs['response'] != response: raise Exception('wrong password') except Exception, e: # Authentication failed. self.send_response(401) hdr = ('Digest ' 'realm="%s", ' 'domain="/", ' 'qop="auth", ' 'algorithm=MD5, ' 'nonce="%s", ' 'opaque="%s"') % (realm, nonce, opaque) if stale: hdr += ', stale="TRUE"' self.send_header('WWW-Authenticate', hdr) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>Denied: %s</title>' % e) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('pairs=%s<p>' % pairs) self.wfile.write('You sent:<br>%s<p>' % self.headers) self.wfile.write('We are replying:<br>%s<p>' % hdr) self.wfile.write('</body></html>') return True # Authentication successful. self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password)) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('pairs=%s<p>' % pairs) self.wfile.write('</body></html>') return True def SlowServerHandler(self): """Wait for the user suggested time before responding. The syntax is /slow?0.5 to wait for half a second.""" if not self._ShouldHandleRequest("/slow"): return False query_char = self.path.find('?') wait_sec = 1.0 if query_char >= 0: try: wait_sec = int(self.path[query_char + 1:]) except ValueError: pass time.sleep(wait_sec) self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write("waited %d seconds" % wait_sec) return True def ChunkedServerHandler(self): """Send chunked response. Allows to specify chunks parameters: - waitBeforeHeaders - ms to wait before sending headers - waitBetweenChunks - ms to wait between chunks - chunkSize - size of each chunk in bytes - chunksNumber - number of chunks Example: /chunked?waitBeforeHeaders=1000&chunkSize=5&chunksNumber=5 waits one second, then sends headers and five chunks five bytes each.""" if not self._ShouldHandleRequest("/chunked"): return False query_char = self.path.find('?') chunkedSettings = {'waitBeforeHeaders' : 0, 'waitBetweenChunks' : 0, 'chunkSize' : 5, 'chunksNumber' : 5} if query_char >= 0: params = self.path[query_char + 1:].split('&') for param in params: keyValue = param.split('=') if len(keyValue) == 2: try: chunkedSettings[keyValue[0]] = int(keyValue[1]) except ValueError: pass time.sleep(0.001 * chunkedSettings['waitBeforeHeaders']) self.protocol_version = 'HTTP/1.1' # Needed for chunked encoding self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Connection', 'close') self.send_header('Transfer-Encoding', 'chunked') self.end_headers() # Chunked encoding: sending all chunks, then final zero-length chunk and # then final CRLF. for i in range(0, chunkedSettings['chunksNumber']): if i > 0: time.sleep(0.001 * chunkedSettings['waitBetweenChunks']) self.sendChunkHelp('*' * chunkedSettings['chunkSize']) self.wfile.flush() # Keep in mind that we start flushing only after 1kb. self.sendChunkHelp('') return True def ContentTypeHandler(self): """Returns a string of html with the given content type. E.g., /contenttype?text/css returns an html file with the Content-Type header set to text/css.""" if not self._ShouldHandleRequest("/contenttype"): return False query_char = self.path.find('?') content_type = self.path[query_char + 1:].strip() if not content_type: content_type = 'text/html' self.send_response(200) self.send_header('Content-Type', content_type) self.end_headers() self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n") return True def NoContentHandler(self): """Returns a 204 No Content response.""" if not self._ShouldHandleRequest("/nocontent"): return False self.send_response(204) self.end_headers() return True def ServerRedirectHandler(self): """Sends a server redirect to the given URL. The syntax is '/server-redirect?http://foo.bar/asdf' to redirect to 'http://foo.bar/asdf'""" test_name = "/server-redirect" if not self._ShouldHandleRequest(test_name): return False query_char = self.path.find('?') if query_char < 0 or len(self.path) <= query_char + 1: self.sendRedirectHelp(test_name) return True dest = self.path[query_char + 1:] self.send_response(301) # moved permanently self.send_header('Location', dest) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest) return True def ClientRedirectHandler(self): """Sends a client redirect to the given URL. The syntax is '/client-redirect?http://foo.bar/asdf' to redirect to 'http://foo.bar/asdf'""" test_name = "/client-redirect" if not self._ShouldHandleRequest(test_name): return False query_char = self.path.find('?') if query_char < 0 or len(self.path) <= query_char + 1: self.sendRedirectHelp(test_name) return True dest = self.path[query_char + 1:] self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest) self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest) return True def MultipartHandler(self): """Send a multipart response (10 text/html pages).""" test_name = '/multipart' if not self._ShouldHandleRequest(test_name): return False num_frames = 10 bound = '12345' self.send_response(200) self.send_header('Content-Type', 'multipart/x-mixed-replace;boundary=' + bound) self.end_headers() for i in xrange(num_frames): self.wfile.write('--' + bound + '\r\n') self.wfile.write('Content-Type: text/html\r\n\r\n') self.wfile.write('<title>page ' + str(i) + '</title>') self.wfile.write('page ' + str(i)) self.wfile.write('--' + bound + '--') return True def GetSSLSessionCacheHandler(self): """Send a reply containing a log of the session cache operations.""" if not self._ShouldHandleRequest('/ssl-session-cache'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() try: for (action, sessionID) in self.server.session_cache.log: self.wfile.write('%s\t%s\n' % (action, sessionID.encode('hex'))) except AttributeError: self.wfile.write('Pass --https-record-resume in order to use' + ' this request') return True def SSLManySmallRecords(self): """Sends a reply consisting of a variety of small writes. These will be translated into a series of small SSL records when used over an HTTPS server.""" if not self._ShouldHandleRequest('/ssl-many-small-records'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() # Write ~26K of data, in 1350 byte chunks for i in xrange(20): self.wfile.write('*' * 1350) self.wfile.flush() return True def GetChannelID(self): """Send a reply containing the hashed ChannelID that the client provided.""" if not self._ShouldHandleRequest('/channel-id'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() channel_id = self.server.tlsConnection.channel_id.tostring() self.wfile.write(hashlib.sha256(channel_id).digest().encode('base64')) return True def CloseSocketHandler(self): """Closes the socket without sending anything.""" if not self._ShouldHandleRequest('/close-socket'): return False self.wfile.close() return True def RangeResetHandler(self): """Send data broken up by connection resets every N (default 4K) bytes. Support range requests. If the data requested doesn't straddle a reset boundary, it will all be sent. Used for testing resuming downloads.""" def DataForRange(start, end): """Data to be provided for a particular range of bytes.""" # Offset and scale to avoid too obvious (and hence potentially # collidable) data. return ''.join([chr(y % 256) for y in range(start * 2 + 15, end * 2 + 15, 2)]) if not self._ShouldHandleRequest('/rangereset'): return False _, _, url_path, _, query, _ = urlparse.urlparse(self.path) # Defaults size = 8000 # Note that the rst is sent just before sending the rst_boundary byte. rst_boundary = 4000 respond_to_range = True hold_for_signal = False rst_limit = -1 token = 'DEFAULT' fail_precondition = 0 send_verifiers = True # Parse the query qdict = urlparse.parse_qs(query, True) if 'size' in qdict: size = int(qdict['size'][0]) if 'rst_boundary' in qdict: rst_boundary = int(qdict['rst_boundary'][0]) if 'token' in qdict: # Identifying token for stateful tests. token = qdict['token'][0] if 'rst_limit' in qdict: # Max number of rsts for a given token. rst_limit = int(qdict['rst_limit'][0]) if 'bounce_range' in qdict: respond_to_range = False if 'hold' in qdict: # Note that hold_for_signal will not work with null range requests; # see TODO below. hold_for_signal = True if 'no_verifiers' in qdict: send_verifiers = False if 'fail_precondition' in qdict: fail_precondition = int(qdict['fail_precondition'][0]) # Record already set information, or set it. rst_limit = TestPageHandler.rst_limits.setdefault(token, rst_limit) if rst_limit != 0: TestPageHandler.rst_limits[token] -= 1 fail_precondition = TestPageHandler.fail_precondition.setdefault( token, fail_precondition) if fail_precondition != 0: TestPageHandler.fail_precondition[token] -= 1 first_byte = 0 last_byte = size - 1 # Does that define what we want to return, or do we need to apply # a range? range_response = False range_header = self.headers.getheader('range') if range_header and respond_to_range: mo = re.match("bytes=(\d*)-(\d*)", range_header) if mo.group(1): first_byte = int(mo.group(1)) if mo.group(2): last_byte = int(mo.group(2)) if last_byte > size - 1: last_byte = size - 1 range_response = True if last_byte < first_byte: return False if (fail_precondition and (self.headers.getheader('If-Modified-Since') or self.headers.getheader('If-Match'))): self.send_response(412) self.end_headers() return True if range_response: self.send_response(206) self.send_header('Content-Range', 'bytes %d-%d/%d' % (first_byte, last_byte, size)) else: self.send_response(200) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Length', last_byte - first_byte + 1) if send_verifiers: self.send_header('Etag', '"XYZZY"') self.send_header('Last-Modified', 'Tue, 19 Feb 2013 14:32 EST') self.end_headers() if hold_for_signal: # TODO(rdsmith/phajdan.jr): http://crbug.com/169519: Without writing # a single byte, the self.server.handle_request() below hangs # without processing new incoming requests. self.wfile.write(DataForRange(first_byte, first_byte + 1)) first_byte = first_byte + 1 # handle requests until one of them clears this flag. self.server.wait_for_download = True while self.server.wait_for_download: self.server.handle_request() possible_rst = ((first_byte / rst_boundary) + 1) * rst_boundary if possible_rst >= last_byte or rst_limit == 0: # No RST has been requested in this range, so we don't need to # do anything fancy; just write the data and let the python # infrastructure close the connection. self.wfile.write(DataForRange(first_byte, last_byte + 1)) self.wfile.flush() return True # We're resetting the connection part way in; go to the RST # boundary and then send an RST. # Because socket semantics do not guarantee that all the data will be # sent when using the linger semantics to hard close a socket, # we send the data and then wait for our peer to release us # before sending the reset. data = DataForRange(first_byte, possible_rst) self.wfile.write(data) self.wfile.flush() self.server.wait_for_download = True while self.server.wait_for_download: self.server.handle_request() l_onoff = 1 # Linger is active. l_linger = 0 # Seconds to linger for. self.connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', l_onoff, l_linger)) # Close all duplicates of the underlying socket to force the RST. self.wfile.close() self.rfile.close() self.connection.close() return True def DefaultResponseHandler(self): """This is the catch-all response handler for requests that aren't handled by one of the special handlers above. Note that we specify the content-length as without it the https connection is not closed properly (and the browser keeps expecting data).""" contents = "Default response given for path: " + self.path self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Content-Length', len(contents)) self.end_headers() if (self.command != 'HEAD'): self.wfile.write(contents) return True def RedirectConnectHandler(self): """Sends a redirect to the CONNECT request for www.redirect.com. This response is not specified by the RFC, so the browser should not follow the redirect.""" if (self.path.find("www.redirect.com") < 0): return False dest = "http://www.destination.com/foo.js" self.send_response(302) # moved temporarily self.send_header('Location', dest) self.send_header('Connection', 'close') self.end_headers() return True def ServerAuthConnectHandler(self): """Sends a 401 to the CONNECT request for www.server-auth.com. This response doesn't make sense because the proxy server cannot request server authentication.""" if (self.path.find("www.server-auth.com") < 0): return False challenge = 'Basic realm="WallyWorld"' self.send_response(401) # unauthorized self.send_header('WWW-Authenticate', challenge) self.send_header('Connection', 'close') self.end_headers() return True def DefaultConnectResponseHandler(self): """This is the catch-all response handler for CONNECT requests that aren't handled by one of the special handlers above. Real Web servers respond with 400 to CONNECT requests.""" contents = "Your client has issued a malformed or illegal request." self.send_response(400) # bad request self.send_header('Content-Type', 'text/html') self.send_header('Content-Length', len(contents)) self.end_headers() self.wfile.write(contents) return True # called by the redirect handling function when there is no parameter def sendRedirectHelp(self, redirect_name): self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><body><h1>Error: no redirect destination</h1>') self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name) self.wfile.write('</body></html>') # called by chunked handling function def sendChunkHelp(self, chunk): # Each chunk consists of: chunk size (hex), CRLF, chunk body, CRLF self.wfile.write('%X\r\n' % len(chunk)) self.wfile.write(chunk) self.wfile.write('\r\n') class OCSPHandler(testserver_base.BasePageHandler): def __init__(self, request, client_address, socket_server): handlers = [self.OCSPResponse] self.ocsp_response = socket_server.ocsp_response testserver_base.BasePageHandler.__init__(self, request, client_address, socket_server, [], handlers, [], handlers, []) def OCSPResponse(self): self.send_response(200) self.send_header('Content-Type', 'application/ocsp-response') self.send_header('Content-Length', str(len(self.ocsp_response))) self.end_headers() self.wfile.write(self.ocsp_response) class TCPEchoHandler(SocketServer.BaseRequestHandler): """The RequestHandler class for TCP echo server. It is instantiated once per connection to the server, and overrides the handle() method to implement communication to the client. """ def handle(self): """Handles the request from the client and constructs a response.""" data = self.request.recv(65536).strip() # Verify the "echo request" message received from the client. Send back # "echo response" message if "echo request" message is valid. try: return_data = echo_message.GetEchoResponseData(data) if not return_data: return except ValueError: return self.request.send(return_data) class UDPEchoHandler(SocketServer.BaseRequestHandler): """The RequestHandler class for UDP echo server. It is instantiated once per connection to the server, and overrides the handle() method to implement communication to the client. """ def handle(self): """Handles the request from the client and constructs a response.""" data = self.request[0].strip() request_socket = self.request[1] # Verify the "echo request" message received from the client. Send back # "echo response" message if "echo request" message is valid. try: return_data = echo_message.GetEchoResponseData(data) if not return_data: return except ValueError: return request_socket.sendto(return_data, self.client_address) class BasicAuthProxyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """A request handler that behaves as a proxy server which requires basic authentication. Only CONNECT, GET and HEAD is supported for now. """ _AUTH_CREDENTIAL = 'Basic Zm9vOmJhcg==' # foo:bar def parse_request(self): """Overrides parse_request to check credential.""" if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self): return False auth = self.headers.getheader('Proxy-Authorization') if auth != self._AUTH_CREDENTIAL: self.send_response(407) self.send_header('Proxy-Authenticate', 'Basic realm="MyRealm1"') self.end_headers() return False return True def _start_read_write(self, sock): sock.setblocking(0) self.request.setblocking(0) rlist = [self.request, sock] while True: ready_sockets, _unused, errors = select.select(rlist, [], []) if errors: self.send_response(500) self.end_headers() return for s in ready_sockets: received = s.recv(1024) if len(received) == 0: return if s == self.request: other = sock else: other = self.request other.send(received) def _do_common_method(self): url = urlparse.urlparse(self.path) port = url.port if not port: if url.scheme == 'http': port = 80 elif url.scheme == 'https': port = 443 if not url.hostname or not port: self.send_response(400) self.end_headers() return if len(url.path) == 0: path = '/' else: path = url.path if len(url.query) > 0: path = '%s?%s' % (url.path, url.query) sock = None try: sock = socket.create_connection((url.hostname, port)) sock.send('%s %s %s\r\n' % ( self.command, path, self.protocol_version)) for header in self.headers.headers: header = header.strip() if (header.lower().startswith('connection') or header.lower().startswith('proxy')): continue sock.send('%s\r\n' % header) sock.send('\r\n') self._start_read_write(sock) except Exception: self.send_response(500) self.end_headers() finally: if sock is not None: sock.close() def do_CONNECT(self): try: pos = self.path.rfind(':') host = self.path[:pos] port = int(self.path[pos+1:]) except Exception: self.send_response(400) self.end_headers() try: sock = socket.create_connection((host, port)) self.send_response(200, 'Connection established') self.end_headers() self._start_read_write(sock) except Exception: self.send_response(500) self.end_headers() finally: sock.close() def do_GET(self): self._do_common_method() def do_HEAD(self): self._do_common_method() class ServerRunner(testserver_base.TestServerRunner): """TestServerRunner for the net test servers.""" def __init__(self): super(ServerRunner, self).__init__() self.__ocsp_server = None def __make_data_dir(self): if self.options.data_dir: if not os.path.isdir(self.options.data_dir): raise testserver_base.OptionError('specified data dir not found: ' + self.options.data_dir + ' exiting...') my_data_dir = self.options.data_dir else: # Create the default path to our data dir, relative to the exe dir. my_data_dir = os.path.join(BASE_DIR, "..", "..", "..", "..", "test", "data") #TODO(ibrar): Must use Find* funtion defined in google\tools #i.e my_data_dir = FindUpward(my_data_dir, "test", "data") return my_data_dir def create_server(self, server_data): port = self.options.port host = self.options.host if self.options.server_type == SERVER_HTTP: if self.options.https: pem_cert_and_key = None if self.options.cert_and_key_file: if not os.path.isfile(self.options.cert_and_key_file): raise testserver_base.OptionError( 'specified server cert file not found: ' + self.options.cert_and_key_file + ' exiting...') pem_cert_and_key = file(self.options.cert_and_key_file, 'r').read() else: # generate a new certificate and run an OCSP server for it. self.__ocsp_server = OCSPServer((host, 0), OCSPHandler) print ('OCSP server started on %s:%d...' % (host, self.__ocsp_server.server_port)) ocsp_der = None ocsp_state = None if self.options.ocsp == 'ok': ocsp_state = minica.OCSP_STATE_GOOD elif self.options.ocsp == 'revoked': ocsp_state = minica.OCSP_STATE_REVOKED elif self.options.ocsp == 'invalid': ocsp_state = minica.OCSP_STATE_INVALID elif self.options.ocsp == 'unauthorized': ocsp_state = minica.OCSP_STATE_UNAUTHORIZED elif self.options.ocsp == 'unknown': ocsp_state = minica.OCSP_STATE_UNKNOWN else: raise testserver_base.OptionError('unknown OCSP status: ' + self.options.ocsp_status) (pem_cert_and_key, ocsp_der) = minica.GenerateCertKeyAndOCSP( subject = "127.0.0.1", ocsp_url = ("http://%s:%d/ocsp" % (host, self.__ocsp_server.server_port)), ocsp_state = ocsp_state, serial = self.options.cert_serial) self.__ocsp_server.ocsp_response = ocsp_der for ca_cert in self.options.ssl_client_ca: if not os.path.isfile(ca_cert): raise testserver_base.OptionError( 'specified trusted client CA file not found: ' + ca_cert + ' exiting...') server = HTTPSServer((host, port), TestPageHandler, pem_cert_and_key, self.options.ssl_client_auth, self.options.ssl_client_ca, self.options.ssl_bulk_cipher, self.options.record_resume, self.options.tls_intolerant) print 'HTTPS server started on %s:%d...' % (host, server.server_port) else: server = HTTPServer((host, port), TestPageHandler) print 'HTTP server started on %s:%d...' % (host, server.server_port) server.data_dir = self.__make_data_dir() server.file_root_url = self.options.file_root_url server_data['port'] = server.server_port elif self.options.server_type == SERVER_WEBSOCKET: # Launch pywebsocket via WebSocketServer. logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) # TODO(toyoshim): Remove following os.chdir. Currently this operation # is required to work correctly. It should be fixed from pywebsocket side. os.chdir(self.__make_data_dir()) websocket_options = WebSocketOptions(host, port, '.') if self.options.cert_and_key_file: websocket_options.use_tls = True websocket_options.private_key = self.options.cert_and_key_file websocket_options.certificate = self.options.cert_and_key_file if self.options.ssl_client_auth: websocket_options.tls_client_auth = True if len(self.options.ssl_client_ca) != 1: raise testserver_base.OptionError( 'one trusted client CA file should be specified') if not os.path.isfile(self.options.ssl_client_ca[0]): raise testserver_base.OptionError( 'specified trusted client CA file not found: ' + self.options.ssl_client_ca[0] + ' exiting...') websocket_options.tls_client_ca = self.options.ssl_client_ca[0] server = WebSocketServer(websocket_options) print 'WebSocket server started on %s:%d...' % (host, server.server_port) server_data['port'] = server.server_port elif self.options.server_type == SERVER_TCP_ECHO: # Used for generating the key (randomly) that encodes the "echo request" # message. random.seed() server = TCPEchoServer((host, port), TCPEchoHandler) print 'Echo TCP server started on port %d...' % server.server_port server_data['port'] = server.server_port elif self.options.server_type == SERVER_UDP_ECHO: # Used for generating the key (randomly) that encodes the "echo request" # message. random.seed() server = UDPEchoServer((host, port), UDPEchoHandler) print 'Echo UDP server started on port %d...' % server.server_port server_data['port'] = server.server_port elif self.options.server_type == SERVER_BASIC_AUTH_PROXY: server = HTTPServer((host, port), BasicAuthProxyRequestHandler) print 'BasicAuthProxy server started on port %d...' % server.server_port server_data['port'] = server.server_port elif self.options.server_type == SERVER_FTP: my_data_dir = self.__make_data_dir() # Instantiate a dummy authorizer for managing 'virtual' users authorizer = pyftpdlib.ftpserver.DummyAuthorizer() # Define a new user having full r/w permissions and a read-only # anonymous user authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw') authorizer.add_anonymous(my_data_dir) # Instantiate FTP handler class ftp_handler = pyftpdlib.ftpserver.FTPHandler ftp_handler.authorizer = authorizer # Define a customized banner (string returned when client connects) ftp_handler.banner = ("pyftpdlib %s based ftpd ready." % pyftpdlib.ftpserver.__ver__) # Instantiate FTP server class and listen to address:port server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler) server_data['port'] = server.socket.getsockname()[1] print 'FTP server started on port %d...' % server_data['port'] else: raise testserver_base.OptionError('unknown server type' + self.options.server_type) return server def run_server(self): if self.__ocsp_server: self.__ocsp_server.serve_forever_on_thread() testserver_base.TestServerRunner.run_server(self) if self.__ocsp_server: self.__ocsp_server.stop_serving() def add_options(self): testserver_base.TestServerRunner.add_options(self) self.option_parser.add_option('-f', '--ftp', action='store_const', const=SERVER_FTP, default=SERVER_HTTP, dest='server_type', help='start up an FTP server.') self.option_parser.add_option('--tcp-echo', action='store_const', const=SERVER_TCP_ECHO, default=SERVER_HTTP, dest='server_type', help='start up a tcp echo server.') self.option_parser.add_option('--udp-echo', action='store_const', const=SERVER_UDP_ECHO, default=SERVER_HTTP, dest='server_type', help='start up a udp echo server.') self.option_parser.add_option('--basic-auth-proxy', action='store_const', const=SERVER_BASIC_AUTH_PROXY, default=SERVER_HTTP, dest='server_type', help='start up a proxy server which requires ' 'basic authentication.') self.option_parser.add_option('--websocket', action='store_const', const=SERVER_WEBSOCKET, default=SERVER_HTTP, dest='server_type', help='start up a WebSocket server.') self.option_parser.add_option('--https', action='store_true', dest='https', help='Specify that https ' 'should be used.') self.option_parser.add_option('--cert-and-key-file', dest='cert_and_key_file', help='specify the ' 'path to the file containing the certificate ' 'and private key for the server in PEM ' 'format') self.option_parser.add_option('--ocsp', dest='ocsp', default='ok', help='The type of OCSP response generated ' 'for the automatically generated ' 'certificate. One of [ok,revoked,invalid]') self.option_parser.add_option('--cert-serial', dest='cert_serial', default=0, type=int, help='If non-zero then the generated ' 'certificate will have this serial number') self.option_parser.add_option('--tls-intolerant', dest='tls_intolerant', default='0', type='int', help='If nonzero, certain TLS connections ' 'will be aborted in order to test version ' 'fallback. 1 means all TLS versions will be ' 'aborted. 2 means TLS 1.1 or higher will be ' 'aborted. 3 means TLS 1.2 or higher will be ' 'aborted.') self.option_parser.add_option('--https-record-resume', dest='record_resume', const=True, default=False, action='store_const', help='Record resumption cache events rather ' 'than resuming as normal. Allows the use of ' 'the /ssl-session-cache request') self.option_parser.add_option('--ssl-client-auth', action='store_true', help='Require SSL client auth on every ' 'connection.') self.option_parser.add_option('--ssl-client-ca', action='append', default=[], help='Specify that the client ' 'certificate request should include the CA ' 'named in the subject of the DER-encoded ' 'certificate contained in the specified ' 'file. This option may appear multiple ' 'times, indicating multiple CA names should ' 'be sent in the request.') self.option_parser.add_option('--ssl-bulk-cipher', action='append', help='Specify the bulk encryption ' 'algorithm(s) that will be accepted by the ' 'SSL server. Valid values are "aes256", ' '"aes128", "3des", "rc4". If omitted, all ' 'algorithms will be used. This option may ' 'appear multiple times, indicating ' 'multiple algorithms should be enabled.'); self.option_parser.add_option('--file-root-url', default='/files/', help='Specify a root URL for files served.') if __name__ == '__main__': sys.exit(ServerRunner().main())
bsd-3-clause
fbossy/SickRage
lib/guessit/textutils.py
33
11207
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function, unicode_literals from guessit import s from guessit.patterns import sep import functools import unicodedata import re # string-related functions def normalize_unicode(s): return unicodedata.normalize('NFC', s) def strip_brackets(s): if not s: return s if ((s[0] == '[' and s[-1] == ']') or (s[0] == '(' and s[-1] == ')') or (s[0] == '{' and s[-1] == '}')): return s[1:-1] return s _dotted_rexp = re.compile(r'(?:\W|^)(([A-Za-z]\.){2,}[A-Za-z]\.?)') def clean_default(st): for c in sep: # do not remove certain chars if c in ['-', ',']: continue if c == '.': # we should not remove the dots for acronyms and such dotted = _dotted_rexp.search(st) if dotted: s = dotted.group(1) exclude_begin, exclude_end = dotted.span(1) st = (st[:exclude_begin].replace(c, ' ') + st[exclude_begin:exclude_end] + st[exclude_end:].replace(c, ' ')) continue st = st.replace(c, ' ') parts = st.split() result = ' '.join(p for p in parts if p != '') # now also remove dashes on the outer part of the string while result and result[0] in '-': result = result[1:] while result and result[-1] in '-': result = result[:-1] return result _words_rexp = re.compile('\w+', re.UNICODE) def find_words(s): return _words_rexp.findall(s.replace('_', ' ')) def iter_words(s): return _words_rexp.finditer(s.replace('_', ' ')) def reorder_title(title, articles=('the',), separators=(',', ', ')): ltitle = title.lower() for article in articles: for separator in separators: suffix = separator + article if ltitle[-len(suffix):] == suffix: return title[-len(suffix) + len(separator):] + ' ' + title[:-len(suffix)] return title def str_replace(string, pos, c): return string[:pos] + c + string[pos + 1:] def str_fill(string, region, c): start, end = region return string[:start] + c * (end - start) + string[end:] def levenshtein(a, b): if not a: return len(b) if not b: return len(a) m = len(a) n = len(b) d = [] for i in range(m + 1): d.append([0] * (n + 1)) for i in range(m + 1): d[i][0] = i for j in range(n + 1): d[0][j] = j for i in range(1, m + 1): for j in range(1, n + 1): if a[i - 1] == b[j - 1]: cost = 0 else: cost = 1 d[i][j] = min(d[i - 1][j] + 1, # deletion d[i][j - 1] + 1, # insertion d[i - 1][j - 1] + cost # substitution ) return d[m][n] # group-related functions def find_first_level_groups_span(string, enclosing): """Return a list of pairs (start, end) for the groups delimited by the given enclosing characters. This does not return nested groups, ie: '(ab(c)(d))' will return a single group containing the whole string. >>> find_first_level_groups_span('abcd', '()') [] >>> find_first_level_groups_span('abc(de)fgh', '()') [(3, 7)] >>> find_first_level_groups_span('(ab(c)(d))', '()') [(0, 10)] >>> find_first_level_groups_span('ab[c]de[f]gh(i)', '[]') [(2, 5), (7, 10)] """ opening, closing = enclosing depth = [] # depth is a stack of indices where we opened a group result = [] for i, c, in enumerate(string): if c == opening: depth.append(i) elif c == closing: try: start = depth.pop() end = i if not depth: # we emptied our stack, so we have a 1st level group result.append((start, end + 1)) except IndexError: # we closed a group which was not opened before pass return result def split_on_groups(string, groups): """Split the given string using the different known groups for boundaries. >>> s(split_on_groups('0123456789', [ (2, 4) ])) ['01', '23', '456789'] >>> s(split_on_groups('0123456789', [ (2, 4), (4, 6) ])) ['01', '23', '45', '6789'] >>> s(split_on_groups('0123456789', [ (5, 7), (2, 4) ])) ['01', '23', '4', '56', '789'] """ if not groups: return [string] boundaries = sorted(set(functools.reduce(lambda l, x: l + list(x), groups, []))) if boundaries[0] != 0: boundaries.insert(0, 0) if boundaries[-1] != len(string): boundaries.append(len(string)) groups = [string[start:end] for start, end in zip(boundaries[:-1], boundaries[1:])] return [g for g in groups if g] # return only non-empty groups def find_first_level_groups(string, enclosing, blank_sep=None): """Return a list of groups that could be split because of explicit grouping. The groups are delimited by the given enclosing characters. You can also specify if you want to blank the separator chars in the returned list of groups by specifying a character for it. None means it won't be replaced. This does not return nested groups, ie: '(ab(c)(d))' will return a single group containing the whole string. >>> s(find_first_level_groups('', '()')) [''] >>> s(find_first_level_groups('abcd', '()')) ['abcd'] >>> s(find_first_level_groups('abc(de)fgh', '()')) ['abc', '(de)', 'fgh'] >>> s(find_first_level_groups('(ab(c)(d))', '()', blank_sep = '_')) ['_ab(c)(d)_'] >>> s(find_first_level_groups('ab[c]de[f]gh(i)', '[]')) ['ab', '[c]', 'de', '[f]', 'gh(i)'] >>> s(find_first_level_groups('()[]()', '()', blank_sep = '-')) ['--', '[]', '--'] """ groups = find_first_level_groups_span(string, enclosing) if blank_sep: for start, end in groups: string = str_replace(string, start, blank_sep) string = str_replace(string, end - 1, blank_sep) return split_on_groups(string, groups) _camel_word2_set = {'is', 'to'} _camel_word3_set = {'the'} def _camel_split_and_lower(string, i): """Retrieves a tuple (need_split, need_lower) need_split is True if this char is a first letter in a camelCasedString. need_lower is True if this char should be lowercased. """ def islower(c): return c.isalpha() and not c.isupper() previous_char2 = string[i - 2] if i > 1 else None previous_char = string[i - 1] if i > 0 else None char = string[i] next_char = string[i + 1] if i + 1 < len(string) else None next_char2 = string[i + 2] if i + 2 < len(string) else None char_upper = char.isupper() char_lower = islower(char) # previous_char2_lower = islower(previous_char2) if previous_char2 else False previous_char2_upper = previous_char2.isupper() if previous_char2 else False previous_char_lower = islower(previous_char) if previous_char else False previous_char_upper = previous_char.isupper() if previous_char else False next_char_upper = next_char.isupper() if next_char else False next_char_lower = islower(next_char) if next_char else False next_char2_upper = next_char2.isupper() if next_char2 else False # next_char2_lower = islower(next_char2) if next_char2 else False mixedcase_word = (previous_char_upper and char_lower and next_char_upper) or \ (previous_char_lower and char_upper and next_char_lower and next_char2_upper) or \ (previous_char2_upper and previous_char_lower and char_upper) if mixedcase_word: word2 = (char + next_char).lower() if next_char else None word3 = (char + next_char + next_char2).lower() if next_char and next_char2 else None word2b = (previous_char2 + previous_char).lower() if previous_char2 and previous_char else None if word2 in _camel_word2_set or word2b in _camel_word2_set or word3 in _camel_word3_set: mixedcase_word = False uppercase_word = previous_char_upper and char_upper and next_char_upper or (char_upper and next_char_upper and next_char2_upper) need_split = char_upper and previous_char_lower and not mixedcase_word if not need_split: previous_char_upper = string[i - 1].isupper() if i > 0 else False next_char_lower = (string[i + 1].isalpha() and not string[i + 1].isupper()) if i + 1 < len(string) else False need_split = char_upper and previous_char_upper and next_char_lower uppercase_word = previous_char_upper and not next_char_lower need_lower = not uppercase_word and not mixedcase_word and need_split return need_split, need_lower def is_camel(string): """ >>> is_camel('dogEATDog') True >>> is_camel('DeathToCamelCase') True >>> is_camel('death_to_camel_case') False >>> is_camel('TheBest') True >>> is_camel('The Best') False """ for i in range(0, len(string)): need_split, _ = _camel_split_and_lower(string, i) if need_split: return True return False def from_camel(string): """ >>> from_camel('dogEATDog') == 'dog EAT dog' True >>> from_camel('DeathToCamelCase') == 'Death to camel case' True >>> from_camel('TheBest') == 'The best' True >>> from_camel('MiXedCaSe is not camelCase') == 'MiXedCaSe is not camel case' True """ if not string: return string pieces = [] for i in range(0, len(string)): char = string[i] need_split, need_lower = _camel_split_and_lower(string, i) if need_split: pieces.append(' ') if need_lower: pieces.append(char.lower()) else: pieces.append(char) return ''.join(pieces) def common_words(s1, s2): common = [] words1 = set(s1.split()) for word in s2.split(): # strip some chars here, e.g. as in [1] if word in words1: common.append(word) return common
gpl-3.0
peiyuwang/pants
tests/python/pants_test/jvm/test_safeargs.py
11
1132
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import unittest from pants.backend.jvm import argfile class SafeArgTest(unittest.TestCase): def test_safe_args_over_max_arg(self): # len(args) > max_args, so it should a file should be yielded args = ['1', '2', '3', '4'] with argfile.safe_args(args, options=None, max_args=2, quoter=lambda x: x, delimiter='') as safe_args: self.assertEqual(1, len(safe_args)) arg_file = safe_args[0] self.assertTrue(os.path.isfile(arg_file)) with open(arg_file) as f: self.assertEqual(['1234'], f.readlines()) def test_safe_args_below_max_arg(self): # len(args) < max_args, so args should pass through. args = ['1', '2', '3', '4'] with argfile.safe_args(args, options=None, max_args=10, quoter=lambda x: x, delimiter='') as safe_args: self.assertTrue(args, safe_args)
apache-2.0