repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
Dave667/service
script.module.requests/lib/requests/packages/charade/escprober.py
206
3273
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, ISO2022KRSMModel) from .charsetprober import CharSetProber from .codingstatemachine import CodingStateMachine from .compat import wrap_ord class EscCharSetProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) self._mCodingSM = [ CodingStateMachine(HZSMModel), CodingStateMachine(ISO2022CNSMModel), CodingStateMachine(ISO2022JPSMModel), CodingStateMachine(ISO2022KRSMModel) ] self.reset() def reset(self): CharSetProber.reset(self) for codingSM in self._mCodingSM: if not codingSM: continue codingSM.active = True codingSM.reset() self._mActiveSM = len(self._mCodingSM) self._mDetectedCharset = None def get_charset_name(self): return self._mDetectedCharset def get_confidence(self): if self._mDetectedCharset: return 0.99 else: return 0.00 def feed(self, aBuf): for c in aBuf: # PY3K: aBuf is a byte array, so c is an int, not a byte for codingSM in self._mCodingSM: if not codingSM: continue if not codingSM.active: continue codingState = codingSM.next_state(wrap_ord(c)) if codingState == constants.eError: codingSM.active = False self._mActiveSM -= 1 if self._mActiveSM <= 0: self._mState = constants.eNotMe return self.get_state() elif codingState == constants.eItsMe: self._mState = constants.eFoundIt self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8 return self.get_state() return self.get_state()
gpl-2.0
maciejkula/kafka-python
kafka/producer.py
10
9861
from __future__ import absolute_import import logging import time from Queue import Empty from collections import defaultdict from itertools import cycle from multiprocessing import Queue, Process from kafka.common import ProduceRequest, TopicAndPartition from kafka.partitioner import HashedPartitioner from kafka.protocol import create_message log = logging.getLogger("kafka") BATCH_SEND_DEFAULT_INTERVAL = 20 BATCH_SEND_MSG_COUNT = 20 STOP_ASYNC_PRODUCER = -1 def _send_upstream(queue, client, batch_time, batch_size, req_acks, ack_timeout): """ Listen on the queue for a specified number of messages or till a specified timeout and send them upstream to the brokers in one request NOTE: Ideally, this should have been a method inside the Producer class. However, multiprocessing module has issues in windows. The functionality breaks unless this function is kept outside of a class """ stop = False client.reinit() while not stop: timeout = batch_time count = batch_size send_at = time.time() + timeout msgset = defaultdict(list) # Keep fetching till we gather enough messages or a # timeout is reached while count > 0 and timeout >= 0: try: topic_partition, msg = queue.get(timeout=timeout) except Empty: break # Check if the controller has requested us to stop if topic_partition == STOP_ASYNC_PRODUCER: stop = True break # Adjust the timeout to match the remaining period count -= 1 timeout = send_at - time.time() msgset[topic_partition].append(msg) # Send collected requests upstream reqs = [] for topic_partition, messages in msgset.items(): req = ProduceRequest(topic_partition.topic, topic_partition.partition, messages) reqs.append(req) try: client.send_produce_request(reqs, acks=req_acks, timeout=ack_timeout) except Exception: log.exception("Unable to send message") class Producer(object): """ Base class to be used by producers Params: client - The Kafka client instance to use async - If set to true, the messages are sent asynchronously via another thread (process). We will not wait for a response to these req_acks - A value indicating the acknowledgements that the server must receive before responding to the request ack_timeout - Value (in milliseconds) indicating a timeout for waiting for an acknowledgement batch_send - If True, messages are send in batches batch_send_every_n - If set, messages are send in batches of this size batch_send_every_t - If set, messages are send after this timeout """ ACK_NOT_REQUIRED = 0 # No ack is required ACK_AFTER_LOCAL_WRITE = 1 # Send response after it is written to log ACK_AFTER_CLUSTER_COMMIT = -1 # Send response after data is committed DEFAULT_ACK_TIMEOUT = 1000 def __init__(self, client, async=False, req_acks=ACK_AFTER_LOCAL_WRITE, ack_timeout=DEFAULT_ACK_TIMEOUT, batch_send=False, batch_send_every_n=BATCH_SEND_MSG_COUNT, batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL): if batch_send: async = True assert batch_send_every_n > 0 assert batch_send_every_t > 0 else: batch_send_every_n = 1 batch_send_every_t = 3600 self.client = client self.async = async self.req_acks = req_acks self.ack_timeout = ack_timeout if self.async: self.queue = Queue() # Messages are sent through this queue self.proc = Process(target=_send_upstream, args=(self.queue, self.client.copy(), batch_send_every_t, batch_send_every_n, self.req_acks, self.ack_timeout)) # Process will die if main thread exits self.proc.daemon = True self.proc.start() def send_messages(self, topic, partition, *msg): """ Helper method to send produce requests """ if self.async: for m in msg: self.queue.put((TopicAndPartition(topic, partition), create_message(m))) resp = [] else: messages = [create_message(m) for m in msg] req = ProduceRequest(topic, partition, messages) try: resp = self.client.send_produce_request([req], acks=self.req_acks, timeout=self.ack_timeout) except Exception: log.exception("Unable to send messages") raise return resp def stop(self, timeout=1): """ Stop the producer. Optionally wait for the specified timeout before forcefully cleaning up. """ if self.async: self.queue.put((STOP_ASYNC_PRODUCER, None)) self.proc.join(timeout) if self.proc.is_alive(): self.proc.terminate() class SimpleProducer(Producer): """ A simple, round-robbin producer. Each message goes to exactly one partition Params: client - The Kafka client instance to use async - If True, the messages are sent asynchronously via another thread (process). We will not wait for a response to these req_acks - A value indicating the acknowledgements that the server must receive before responding to the request ack_timeout - Value (in milliseconds) indicating a timeout for waiting for an acknowledgement batch_send - If True, messages are send in batches batch_send_every_n - If set, messages are send in batches of this size batch_send_every_t - If set, messages are send after this timeout """ def __init__(self, client, async=False, req_acks=Producer.ACK_AFTER_LOCAL_WRITE, ack_timeout=Producer.DEFAULT_ACK_TIMEOUT, batch_send=False, batch_send_every_n=BATCH_SEND_MSG_COUNT, batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL): self.partition_cycles = {} super(SimpleProducer, self).__init__(client, async, req_acks, ack_timeout, batch_send, batch_send_every_n, batch_send_every_t) def _next_partition(self, topic): if topic not in self.partition_cycles: if topic not in self.client.topic_partitions: self.client.load_metadata_for_topics(topic) self.partition_cycles[topic] = cycle(self.client.topic_partitions[topic]) return self.partition_cycles[topic].next() def send_messages(self, topic, *msg): partition = self._next_partition(topic) return super(SimpleProducer, self).send_messages(topic, partition, *msg) def __repr__(self): return '<SimpleProducer batch=%s>' % self.async class KeyedProducer(Producer): """ A producer which distributes messages to partitions based on the key Args: client - The kafka client instance partitioner - A partitioner class that will be used to get the partition to send the message to. Must be derived from Partitioner async - If True, the messages are sent asynchronously via another thread (process). We will not wait for a response to these ack_timeout - Value (in milliseconds) indicating a timeout for waiting for an acknowledgement batch_send - If True, messages are send in batches batch_send_every_n - If set, messages are send in batches of this size batch_send_every_t - If set, messages are send after this timeout """ def __init__(self, client, partitioner=None, async=False, req_acks=Producer.ACK_AFTER_LOCAL_WRITE, ack_timeout=Producer.DEFAULT_ACK_TIMEOUT, batch_send=False, batch_send_every_n=BATCH_SEND_MSG_COUNT, batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL): if not partitioner: partitioner = HashedPartitioner self.partitioner_class = partitioner self.partitioners = {} super(KeyedProducer, self).__init__(client, async, req_acks, ack_timeout, batch_send, batch_send_every_n, batch_send_every_t) def _next_partition(self, topic, key): if topic not in self.partitioners: if topic not in self.client.topic_partitions: self.client.load_metadata_for_topics(topic) self.partitioners[topic] = \ self.partitioner_class(self.client.topic_partitions[topic]) partitioner = self.partitioners[topic] return partitioner.partition(key, self.client.topic_partitions[topic]) def send(self, topic, key, msg): partition = self._next_partition(topic, key) return self.send_messages(topic, partition, msg) def __repr__(self): return '<KeyedProducer batch=%s>' % self.async
apache-2.0
stdrickforce/zerorpc-python
zerorpc/version.py
4
1465
# -*- coding: utf-8 -*- # Open Source Initiative OSI - The MIT License (MIT):Licensing # # The MIT License (MIT) # Copyright (c) 2015 François-Xavier Bourlet (bombela+zerorpc@gmail.com) # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __title__ = 'zerorpc' __version__ = '0.5.2' __author__ = 'François-Xavier Bourlet <bombela+zerorpc@gmail.com>.' __license__ = 'MIT' __copyright__ = 'Copyright 2015 François-Xavier Bourlet <bombela+zerorpc@gmail.com>.'
mit
jirikuncar/invenio-upgrader
tests/test_upgrader.py
5
11320
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2012, 2013 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Test unit for the miscutil/lib/inveniocfg_upgrader module. """ from datetime import date import os import os.path import shutil import sys import tempfile import six from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase def dictify(ls, value=None): if value is not None: return dict([(x, value) for x in ls]) else: return dict([(x['id'], x) for x in ls]) def upgrades_str(ls): # Helper to create a string out of a list of upgrades class Xcls(object): def __init__(self, id_val): self.id = id_val def __repr__(self): return str(self.id) return str(map(lambda x: Xcls(x['id']), ls)) class TestUpgrade(dict): def __init__(self, node_id, depends_on, repository): self['id'] = node_id self['depends_on'] = depends_on self['repository'] = repository self['do_upgrade'] = lambda: str(node_id) class TestInvenioUpgraderOrdering(InvenioTestCase): def test_normal(self): """ Normal dependency graph """ from invenio_upgrader.engine import InvenioUpgrader upgrades = dictify([ TestUpgrade('1', [], 'invenio'), TestUpgrade('2', ['1'], 'invenio'), TestUpgrade('3', ['1'], 'invenio'), TestUpgrade('4', ['2'], 'invenio'), TestUpgrade('5', ['3', '4'], 'invenio'), TestUpgrade('6', ['5', ], 'invenio'), ]) m = InvenioUpgrader() self.assertEqual(upgrades_str(m.order_upgrades(upgrades)), "[1, 2, 4, 3, 5, 6]") def test_two_graphs(self): """ Two independent graphs """ from invenio_upgrader.engine import InvenioUpgrader upgrades = dictify([ TestUpgrade('1', [], 'invenio'), TestUpgrade('2', ['1'], 'invenio'), TestUpgrade('3', ['1'], 'invenio'), TestUpgrade('a', [], 'other'), TestUpgrade('b', ['a'], 'other'), TestUpgrade('c', ['a'], 'other'), TestUpgrade('4', ['2'], 'invenio'), TestUpgrade('5', ['3', '4'], 'invenio'), TestUpgrade('6', ['5', ], 'invenio'), TestUpgrade('d', ['b'], 'other'), TestUpgrade('e', ['c', 'd'], 'other'), TestUpgrade('f', ['e', ], 'other'), ]) m = InvenioUpgrader() self.assertEqual(upgrades_str(m.order_upgrades(upgrades)), "[1, 2, 4, 3, 5, 6, a, b, d, c, e, f]") def test_cycle(self): """ Cycle 2, 4, 3. """ from invenio_upgrader.engine import InvenioUpgrader upgrades = dictify([ TestUpgrade('1', [], 'invenio'), TestUpgrade('2', ['1', '3'], 'invenio'), TestUpgrade('3', ['1', '4'], 'invenio'), TestUpgrade('4', ['2'], 'invenio'), TestUpgrade('5', ['3', '4'], 'invenio'), TestUpgrade('6', ['5', ], 'invenio'), ]) m = InvenioUpgrader() self.assertRaises(StandardError, m.order_upgrades, upgrades) def test_missing_dependency(self): """ Missing dependency 0 """ from invenio_upgrader.engine import InvenioUpgrader upgrades = dictify([ TestUpgrade('1', [], 'invenio'), TestUpgrade('2', ['1'], 'invenio'), TestUpgrade('3', ['1', '0'], 'invenio'), ]) m = InvenioUpgrader() self.assertRaises(StandardError, m.order_upgrades, upgrades) def test_cross_graph_dependency(self): """ Missing dependency 0 """ from invenio_upgrader.engine import InvenioUpgrader upgrades = dictify([ TestUpgrade('1', [], 'invenio'), TestUpgrade('2', ['1'], 'invenio'), TestUpgrade('3', ['1', 'b'], 'invenio'), TestUpgrade('a', [], 'other'), TestUpgrade('b', ['a'], 'other'), TestUpgrade('c', ['2'], 'other'), ]) m = InvenioUpgrader() #self.assertRaises(StandardError, m.order_upgrades, upgrades) self.assertEqual(upgrades_str(m.order_upgrades(upgrades)), "[1, 2, c, a, b, 3]") def test_history(self): """ History """ from invenio_upgrader.engine import InvenioUpgrader upgrades = dictify([ TestUpgrade('1', [], 'invenio'), TestUpgrade('2', ['1'], 'invenio'), TestUpgrade('3', ['1'], 'invenio'), TestUpgrade('4', ['2'], 'invenio'), TestUpgrade('5', ['3', '4'], 'invenio'), TestUpgrade('6', ['5', ], 'invenio'), ]) history = dictify(['1', '2', '4'], value=1) m = InvenioUpgrader() self.assertEqual(upgrades_str(m.order_upgrades(upgrades, history)), "[3, 5, 6]") history = dictify(['3', '5'], value=1) m = InvenioUpgrader() self.assertEqual( upgrades_str(m.order_upgrades(upgrades, history)), "[6]") class TestInvenioUpgraderRecipe(InvenioTestCase): def setUp(self): """ Setup a test python package, to test upgrade recipe creation. """ self.tmpdir = tempfile.mkdtemp() self.pkg_path = os.path.join(self.tmpdir, 'invenio_upgrader_test') os.makedirs(self.pkg_path) open(os.path.join(self.pkg_path, '__init__.py'), 'a').close() self.pkg_path_mymod = os.path.join( self.tmpdir, 'invenio_upgrader_test/mymod' ) os.makedirs(self.pkg_path_mymod) open(os.path.join(self.pkg_path, '__init__.py'), 'a').close() open(os.path.join(self.pkg_path_mymod, '__init__.py'), 'a').close() sys.path.append(self.tmpdir) import invenio_upgrader_test import invenio_upgrader_test.mymod def tearDown(self): """ Remove test package again """ sys.path.remove(self.tmpdir) keys = [] for m in sys.modules: if m.startswith('invenio_upgrader_test'): keys.append(m) for k in keys: del sys.modules[k] try: import invenio_upgrader_test raise AssertionError("Test package not removed from sys.path") except ImportError: pass shutil.rmtree(self.tmpdir) def test_create(self): """ Test creation of upgrades """ from invenio_upgrader.commands import \ cmd_upgrade_create_standard_recipe cmd_upgrade_create_standard_recipe( 'invenio_upgrader_test.mymod', depends_on=['test1', 'test2'] ) # Test if upgrade can be imported expexted_name = "mymod_%s_rename_me" % \ date.today().strftime("%Y_%m_%d") import invenio_upgrader_test.mymod.upgrades upgrade = getattr( __import__( 'invenio_upgrader_test.mymod.upgrades', globals(), locals(), [expexted_name], -1 ), expexted_name ) # Test API of created upgrade recipe assert upgrade.depends_on == ['test1', 'test2'] assert upgrade.estimate() == 1 assert isinstance(upgrade.info(), six.string_types) upgrade.pre_upgrade() upgrade.do_upgrade() upgrade.post_upgrade() def test_create_load_engine(self): """ Test creation and loading of upgrades with engine """ from invenio_upgrader.commands import \ cmd_upgrade_create_standard_recipe cmd_upgrade_create_standard_recipe( 'invenio_upgrader_test', depends_on=[] ) expexted_name = "invenio_upgrader_test_%s_rename_me" % \ date.today().strftime("%Y_%m_%d") # Test if upgrade can be found from the Upgrade from invenio_upgrader.engine import InvenioUpgrader eng = InvenioUpgrader(packages=['invenio_upgrader_test']) upgrades = eng.get_upgrades(remove_applied=False) assert len(upgrades) == 1 assert upgrades[0]['id'] == expexted_name assert upgrades[0]['repository'] == 'invenio_upgrader_test' def test_double_create(self): """ Test creation of upgrades """ from invenio_upgrader.commands import \ cmd_upgrade_create_standard_recipe cmd_upgrade_create_standard_recipe('invenio_upgrader_test') # Second call fails since module already exists, and we didn't # rename it yet. self.assertRaises( SystemExit, cmd_upgrade_create_standard_recipe, 'invenio_upgrader_test', ) def test_create_with_module(self): from invenio_upgrader.commands import \ cmd_upgrade_create_standard_recipe # Module instead of package self.assertRaises( SystemExit, cmd_upgrade_create_standard_recipe, 'invenio_upgrader.engine' ) def test_invalid_path(self): """ Test creation of upgrades """ from invenio_upgrader.commands import \ cmd_upgrade_create_standard_recipe self.assertRaises( SystemExit, cmd_upgrade_create_standard_recipe, 'invenio_upgrader_test', output_path=os.path.join(self.tmpdir, 'this_does_not_exists') ) def test_create_release(self): """ Test creation of upgrades """ from invenio_upgrader.engine import InvenioUpgrader from invenio_upgrader.commands import \ cmd_upgrade_create_standard_recipe, \ cmd_upgrade_create_release_recipe engine = InvenioUpgrader(packages=[ 'invenio_upgrader_test', 'invenio_upgrader_test.mymod']) cmd_upgrade_create_standard_recipe( 'invenio_upgrader_test', depends_on=[] ) cmd_upgrade_create_standard_recipe( 'invenio_upgrader_test.mymod', depends_on=[] ) cmd_upgrade_create_release_recipe( 'invenio_upgrader_test', repository='invenio', upgrader=engine ) # Find all endpoints in all repositories upgrades = engine.get_upgrades(remove_applied=False) for u in upgrades: if u['id'] == 'invenio_release_x_y_z': assert len(u['depends_on']) == 2 TEST_SUITE = make_test_suite( TestInvenioUpgraderOrdering, TestInvenioUpgraderRecipe, ) if __name__ == "__main__": run_test_suite(TEST_SUITE)
gpl-2.0
redhat-openstack/python-openstackclient
openstackclient/tests/identity/v3/test_endpoint.py
1
24614
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from openstackclient.identity.v3 import endpoint from openstackclient.tests import fakes from openstackclient.tests.identity.v3 import fakes as identity_fakes class TestEndpoint(identity_fakes.TestIdentityv3): def setUp(self): super(TestEndpoint, self).setUp() # Get a shortcut to the EndpointManager Mock self.endpoints_mock = self.app.client_manager.identity.endpoints self.endpoints_mock.reset_mock() # Get a shortcut to the ServiceManager Mock self.services_mock = self.app.client_manager.identity.services self.services_mock.reset_mock() def get_fake_service_name(self): return identity_fakes.service_name class TestEndpointCreate(TestEndpoint): columns = ( 'enabled', 'id', 'interface', 'region', 'service_id', 'service_name', 'service_type', 'url', ) def setUp(self): super(TestEndpointCreate, self).setUp() self.endpoints_mock.create.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE), loaded=True, ) # Get the command object to test self.cmd = endpoint.CreateEndpoint(self.app, None) def test_endpoint_create_no_options(self): arglist = [ identity_fakes.service_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, ] verifylist = [ ('enabled', True), ('service', identity_fakes.service_id), ('interface', identity_fakes.endpoint_interface), ('url', identity_fakes.endpoint_url), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'service': identity_fakes.service_id, 'url': identity_fakes.endpoint_url, 'interface': identity_fakes.endpoint_interface, 'enabled': True, 'region': None, } self.endpoints_mock.create.assert_called_with( **kwargs ) self.assertEqual(self.columns, columns) datalist = ( True, identity_fakes.endpoint_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_region, identity_fakes.service_id, self.get_fake_service_name(), identity_fakes.service_type, identity_fakes.endpoint_url, ) self.assertEqual(datalist, data) def test_endpoint_create_region(self): arglist = [ identity_fakes.service_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, '--region', identity_fakes.endpoint_region, ] verifylist = [ ('enabled', True), ('service', identity_fakes.service_id), ('interface', identity_fakes.endpoint_interface), ('url', identity_fakes.endpoint_url), ('region', identity_fakes.endpoint_region), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'service': identity_fakes.service_id, 'url': identity_fakes.endpoint_url, 'interface': identity_fakes.endpoint_interface, 'enabled': True, 'region': identity_fakes.endpoint_region, } self.endpoints_mock.create.assert_called_with( **kwargs ) self.assertEqual(self.columns, columns) datalist = ( True, identity_fakes.endpoint_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_region, identity_fakes.service_id, self.get_fake_service_name(), identity_fakes.service_type, identity_fakes.endpoint_url, ) self.assertEqual(datalist, data) def test_endpoint_create_enable(self): arglist = [ identity_fakes.service_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, '--enable' ] verifylist = [ ('enabled', True), ('service', identity_fakes.service_id), ('interface', identity_fakes.endpoint_interface), ('url', identity_fakes.endpoint_url), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'service': identity_fakes.service_id, 'url': identity_fakes.endpoint_url, 'interface': identity_fakes.endpoint_interface, 'enabled': True, 'region': None, } self.endpoints_mock.create.assert_called_with( **kwargs ) self.assertEqual(self.columns, columns) datalist = ( True, identity_fakes.endpoint_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_region, identity_fakes.service_id, self.get_fake_service_name(), identity_fakes.service_type, identity_fakes.endpoint_url, ) self.assertEqual(datalist, data) def test_endpoint_create_disable(self): arglist = [ identity_fakes.service_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, '--disable', ] verifylist = [ ('enabled', False), ('service', identity_fakes.service_id), ('interface', identity_fakes.endpoint_interface), ('url', identity_fakes.endpoint_url), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'service': identity_fakes.service_id, 'url': identity_fakes.endpoint_url, 'interface': identity_fakes.endpoint_interface, 'enabled': False, 'region': None, } self.endpoints_mock.create.assert_called_with( **kwargs ) self.assertEqual(self.columns, columns) datalist = ( True, identity_fakes.endpoint_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_region, identity_fakes.service_id, self.get_fake_service_name(), identity_fakes.service_type, identity_fakes.endpoint_url, ) self.assertEqual(datalist, data) class TestEndpointDelete(TestEndpoint): def setUp(self): super(TestEndpointDelete, self).setUp() # This is the return value for utils.find_resource(endpoint) self.endpoints_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) self.endpoints_mock.delete.return_value = None # Get the command object to test self.cmd = endpoint.DeleteEndpoint(self.app, None) def test_endpoint_delete(self): arglist = [ identity_fakes.endpoint_id, ] verifylist = [ ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.endpoints_mock.delete.assert_called_with( identity_fakes.endpoint_id, ) self.assertIsNone(result) class TestEndpointList(TestEndpoint): columns = ( 'ID', 'Region', 'Service Name', 'Service Type', 'Enabled', 'Interface', 'URL', ) def setUp(self): super(TestEndpointList, self).setUp() self.endpoints_mock.list.return_value = [ fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ), ] # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE), loaded=True, ) # Get the command object to test self.cmd = endpoint.ListEndpoint(self.app, None) def test_endpoint_list_no_options(self): arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) self.endpoints_mock.list.assert_called_with() self.assertEqual(self.columns, columns) datalist = ( ( identity_fakes.endpoint_id, identity_fakes.endpoint_region, self.get_fake_service_name(), identity_fakes.service_type, True, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, ), ) self.assertEqual(datalist, tuple(data)) def test_endpoint_list_service(self): arglist = [ '--service', identity_fakes.service_id, ] verifylist = [ ('service', identity_fakes.service_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'service': identity_fakes.service_id, } self.endpoints_mock.list.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( ( identity_fakes.endpoint_id, identity_fakes.endpoint_region, self.get_fake_service_name(), identity_fakes.service_type, True, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, ), ) self.assertEqual(datalist, tuple(data)) def test_endpoint_list_interface(self): arglist = [ '--interface', identity_fakes.endpoint_interface, ] verifylist = [ ('interface', identity_fakes.endpoint_interface), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'interface': identity_fakes.endpoint_interface, } self.endpoints_mock.list.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( ( identity_fakes.endpoint_id, identity_fakes.endpoint_region, self.get_fake_service_name(), identity_fakes.service_type, True, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, ), ) self.assertEqual(datalist, tuple(data)) def test_endpoint_list_region(self): arglist = [ '--region', identity_fakes.endpoint_region, ] verifylist = [ ('region', identity_fakes.endpoint_region), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'region': identity_fakes.endpoint_region, } self.endpoints_mock.list.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( ( identity_fakes.endpoint_id, identity_fakes.endpoint_region, self.get_fake_service_name(), identity_fakes.service_type, True, identity_fakes.endpoint_interface, identity_fakes.endpoint_url, ), ) self.assertEqual(datalist, tuple(data)) class TestEndpointSet(TestEndpoint): def setUp(self): super(TestEndpointSet, self).setUp() # This is the return value for utils.find_resource(endpoint) self.endpoints_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) self.endpoints_mock.update.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE), loaded=True, ) # Get the command object to test self.cmd = endpoint.SetEndpoint(self.app, None) def test_endpoint_set_no_options(self): arglist = [ identity_fakes.endpoint_id, ] verifylist = [ ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.assertNotCalled(self.endpoints_mock.update) self.assertIsNone(result) def test_endpoint_set_interface(self): arglist = [ '--interface', 'public', identity_fakes.endpoint_id ] verifylist = [ ('interface', 'public'), ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'enabled': None, 'interface': 'public', 'url': None, 'region': None, 'service': None, } self.endpoints_mock.update.assert_called_with( identity_fakes.endpoint_id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_url(self): arglist = [ '--url', 'http://localhost:5000', identity_fakes.endpoint_id ] verifylist = [ ('url', 'http://localhost:5000'), ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'enabled': None, 'interface': None, 'url': 'http://localhost:5000', 'region': None, 'service': None, } self.endpoints_mock.update.assert_called_with( identity_fakes.endpoint_id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_service(self): arglist = [ '--service', identity_fakes.service_id, identity_fakes.endpoint_id ] verifylist = [ ('service', identity_fakes.service_id), ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'enabled': None, 'interface': None, 'url': None, 'region': None, 'service': identity_fakes.service_id, } self.endpoints_mock.update.assert_called_with( identity_fakes.endpoint_id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_region(self): arglist = [ '--region', 'e-rzzz', identity_fakes.endpoint_id ] verifylist = [ ('region', 'e-rzzz'), ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'enabled': None, 'interface': None, 'url': None, 'region': 'e-rzzz', 'service': None, } self.endpoints_mock.update.assert_called_with( identity_fakes.endpoint_id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_enable(self): arglist = [ '--enable', identity_fakes.endpoint_id ] verifylist = [ ('enabled', True), ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'enabled': True, 'interface': None, 'url': None, 'region': None, 'service': None, } self.endpoints_mock.update.assert_called_with( identity_fakes.endpoint_id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_disable(self): arglist = [ '--disable', identity_fakes.endpoint_id ] verifylist = [ ('disabled', True), ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'enabled': False, 'interface': None, 'url': None, 'region': None, 'service': None, } self.endpoints_mock.update.assert_called_with( identity_fakes.endpoint_id, **kwargs ) self.assertIsNone(result) class TestEndpointShow(TestEndpoint): def setUp(self): super(TestEndpointShow, self).setUp() self.endpoints_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE), loaded=True, ) # Get the command object to test self.cmd = endpoint.ShowEndpoint(self.app, None) def test_endpoint_show(self): arglist = [ identity_fakes.endpoint_id, ] verifylist = [ ('endpoint', identity_fakes.endpoint_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) self.endpoints_mock.get.assert_called_with( identity_fakes.endpoint_id, ) collist = ( 'enabled', 'id', 'interface', 'region', 'service_id', 'service_name', 'service_type', 'url', ) self.assertEqual(collist, columns) datalist = ( True, identity_fakes.endpoint_id, identity_fakes.endpoint_interface, identity_fakes.endpoint_region, identity_fakes.service_id, self.get_fake_service_name(), identity_fakes.service_type, identity_fakes.endpoint_url, ) self.assertEqual(datalist, data) class TestEndpointCreateServiceWithoutName(TestEndpointCreate): def setUp(self): super(TestEndpointCreate, self).setUp() self.endpoints_mock.create.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE_WITHOUT_NAME), loaded=True, ) # Get the command object to test self.cmd = endpoint.CreateEndpoint(self.app, None) def get_fake_service_name(self): return '' class TestEndpointListServiceWithoutName(TestEndpointList): def setUp(self): super(TestEndpointList, self).setUp() self.endpoints_mock.list.return_value = [ fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ), ] # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE_WITHOUT_NAME), loaded=True, ) # Get the command object to test self.cmd = endpoint.ListEndpoint(self.app, None) def get_fake_service_name(self): return '' class TestEndpointShowServiceWithoutName(TestEndpointShow): def setUp(self): super(TestEndpointShow, self).setUp() self.endpoints_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.ENDPOINT), loaded=True, ) # This is the return value for common.find_resource(service) self.services_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.SERVICE_WITHOUT_NAME), loaded=True, ) # Get the command object to test self.cmd = endpoint.ShowEndpoint(self.app, None) def get_fake_service_name(self): return ''
apache-2.0
wildtetris/python-social-auth
social/backends/dribbble.py
70
2218
""" Dribbble OAuth2 backend, docs at: http://psa.matiasaguirre.net/docs/backends/dribbble.html http://developer.dribbble.com/v1/oauth/ """ from social.backends.oauth import BaseOAuth2 class DribbbleOAuth2(BaseOAuth2): """Dribbble OAuth authentication backend""" name = 'dribbble' AUTHORIZATION_URL = 'https://dribbble.com/oauth/authorize' ACCESS_TOKEN_URL = 'https://dribbble.com/oauth/token' ACCESS_TOKEN_METHOD = 'POST' SCOPE_SEPARATOR = ',' EXTRA_DATA = [ ('id', 'id'), ('name', 'name'), ('html_url', 'html_url'), ('avatar_url', 'avatar_url'), ('bio', 'bio'), ('location', 'location'), ('links', 'links'), ('buckets_count', 'buckets_count'), ('comments_received_count', 'comments_received_count'), ('followers_count', 'followers_count'), ('followings_count', 'followings_count'), ('likes_count', 'likes_count'), ('likes_received_count', 'likes_received_count'), ('projects_count', 'projects_count'), ('rebounds_received_count', 'rebounds_received_count'), ('shots_count', 'shots_count'), ('teams_count', 'teams_count'), ('pro', 'pro'), ('buckets_url', 'buckets_url'), ('followers_url', 'followers_url'), ('following_url', 'following_url'), ('likes_url', 'shots_url'), ('teams_url', 'teams_url'), ('created_at', 'created_at'), ('updated_at', 'updated_at'), ] def get_user_details(self, response): """Return user details from Dribbble account""" fullname, first_name, last_name = self.get_user_names( response.get('name') ) return {'username': response.get('username'), 'email': response.get('email', ''), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name} def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" return self.get_json( 'https://api.dribbble.com/v1/user', headers={ 'Authorization': ' Bearer {0}'.format(access_token) })
bsd-3-clause
SerpentCS/odoo
addons/sale/report/sale_report.py
111
5981
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields, osv class sale_report(osv.osv): _name = "sale.report" _description = "Sales Orders Statistics" _auto = False _rec_name = 'date' _columns = { 'date': fields.datetime('Date Order', readonly=True), # TDE FIXME master: rename into date_order 'date_confirm': fields.date('Date Confirm', readonly=True), 'product_id': fields.many2one('product.product', 'Product', readonly=True), 'product_uom': fields.many2one('product.uom', 'Unit of Measure', readonly=True), 'product_uom_qty': fields.float('# of Qty', readonly=True), 'partner_id': fields.many2one('res.partner', 'Partner', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'user_id': fields.many2one('res.users', 'Salesperson', readonly=True), 'price_total': fields.float('Total Price', readonly=True), 'delay': fields.float('Commitment Delay', digits=(16,2), readonly=True), 'categ_id': fields.many2one('product.category','Category of Product', readonly=True), 'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines 'state': fields.selection([ ('cancel', 'Cancelled'), ('draft', 'Draft'), ('confirmed', 'Confirmed'), ('exception', 'Exception'), ('done', 'Done')], 'Order Status', readonly=True), 'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', readonly=True), 'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True), 'section_id': fields.many2one('crm.case.section', 'Sales Team'), } _order = 'date desc' def _select(self): select_str = """ WITH currency_rate (currency_id, rate, date_start, date_end) AS ( SELECT r.currency_id, r.rate, r.name AS date_start, (SELECT name FROM res_currency_rate r2 WHERE r2.name > r.name AND r2.currency_id = r.currency_id ORDER BY r2.name ASC LIMIT 1) AS date_end FROM res_currency_rate r ) SELECT min(l.id) as id, l.product_id as product_id, t.uom_id as product_uom, sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty, sum(l.product_uom_qty * cr.rate * l.price_unit * (100.0-l.discount) / 100.0) as price_total, count(*) as nbr, s.date_order as date, s.date_confirm as date_confirm, s.partner_id as partner_id, s.user_id as user_id, s.company_id as company_id, extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay, l.state, t.categ_id as categ_id, s.pricelist_id as pricelist_id, s.project_id as analytic_account_id, s.section_id as section_id """ return select_str def _from(self): from_str = """ sale_order_line l join sale_order s on (l.order_id=s.id) left join product_product p on (l.product_id=p.id) left join product_template t on (p.product_tmpl_id=t.id) left join product_uom u on (u.id=l.product_uom) left join product_uom u2 on (u2.id=t.uom_id) left join product_pricelist pp on (s.pricelist_id = pp.id) join currency_rate cr on (cr.currency_id = pp.currency_id and cr.date_start <= coalesce(s.date_order, now()) and (cr.date_end is null or cr.date_end > coalesce(s.date_order, now()))) """ return from_str def _group_by(self): group_by_str = """ GROUP BY l.product_id, l.order_id, t.uom_id, t.categ_id, s.date_order, s.date_confirm, s.partner_id, s.user_id, s.company_id, l.state, s.pricelist_id, s.project_id, s.section_id """ return group_by_str def init(self, cr): # self._table = sale_report tools.drop_view_if_exists(cr, self._table) cr.execute("""CREATE or REPLACE VIEW %s as ( %s FROM ( %s ) %s )""" % (self._table, self._select(), self._from(), self._group_by())) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
osvalr/odoo
addons/auth_crypt/__init__.py
435
1050
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import auth_crypt # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
begoldsm/autorest
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/operations/polymorphism_operations.py
14
7875
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from .. import models class PolymorphismOperations(object): """PolymorphismOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def get_valid( self, custom_headers=None, raw=False, **operation_config): """Get complex types that are polymorphic. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: :class:`Fish <fixtures.acceptancetestsbodycomplex.models.Fish>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestsbodycomplex.models.ErrorException>` """ # Construct URL url = '/complex/polymorphism/valid' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('Fish', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def put_valid( self, complex_body, custom_headers=None, raw=False, **operation_config): """Put complex types that are polymorphic. :param complex_body: Please put a salmon that looks like this: { 'fishtype':'Salmon', 'location':'alaska', 'iswild':true, 'species':'king', 'length':1.0, 'siblings':[ { 'fishtype':'Shark', 'age':6, 'birthday': '2012-01-05T01:00:00Z', 'length':20.0, 'species':'predator', }, { 'fishtype':'Sawshark', 'age':105, 'birthday': '1900-01-05T01:00:00Z', 'length':10.0, 'picture': new Buffer([255, 255, 255, 255, 254]).toString('base64'), 'species':'dangerous', }, { 'fishtype': 'goblin', 'age': 1, 'birthday': '2015-08-08T00:00:00Z', 'length': 30.0, 'species': 'scary', 'jawsize': 5 } ] }; :type complex_body: :class:`Fish <fixtures.acceptancetestsbodycomplex.models.Fish>` :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestsbodycomplex.models.ErrorException>` """ # Construct URL url = '/complex/polymorphism/valid' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(complex_body, 'Fish') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def put_valid_missing_required( self, complex_body, custom_headers=None, raw=False, **operation_config): """Put complex types that are polymorphic, attempting to omit required 'birthday' field - the request should not be allowed from the client. :param complex_body: Please attempt put a sawshark that looks like this, the client should not allow this data to be sent: { "fishtype": "sawshark", "species": "snaggle toothed", "length": 18.5, "age": 2, "birthday": "2013-06-01T01:00:00Z", "location": "alaska", "picture": base64(FF FF FF FF FE), "siblings": [ { "fishtype": "shark", "species": "predator", "birthday": "2012-01-05T01:00:00Z", "length": 20, "age": 6 }, { "fishtype": "sawshark", "species": "dangerous", "picture": base64(FF FF FF FF FE), "length": 10, "age": 105 } ] } :type complex_body: :class:`Fish <fixtures.acceptancetestsbodycomplex.models.Fish>` :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`ErrorException<fixtures.acceptancetestsbodycomplex.models.ErrorException>` """ # Construct URL url = '/complex/polymorphism/missingrequired/invalid' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(complex_body, 'Fish') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
mit
adw0rd/lettuce
tests/integration/lib/Django-1.2.5/django/contrib/gis/db/backends/util.py
377
1749
""" A collection of utility routines and classes used by the spatial backends. """ def gqn(val): """ The geographic quote name function; used for quoting tables and geometries (they use single rather than the double quotes of the backend quotename function). """ if isinstance(val, basestring): if isinstance(val, unicode): val = val.encode('ascii') return "'%s'" % val else: return str(val) class SpatialOperation(object): """ Base class for generating spatial SQL. """ sql_template = '%(geo_col)s %(operator)s %(geometry)s' def __init__(self, function='', operator='', result='', **kwargs): self.function = function self.operator = operator self.result = result self.extra = kwargs def as_sql(self, geo_col, geometry='%s'): return self.sql_template % self.params(geo_col, geometry) def params(self, geo_col, geometry): params = {'function' : self.function, 'geo_col' : geo_col, 'geometry' : geometry, 'operator' : self.operator, 'result' : self.result, } params.update(self.extra) return params class SpatialFunction(SpatialOperation): """ Base class for generating spatial SQL related to a function. """ sql_template = '%(function)s(%(geo_col)s, %(geometry)s)' def __init__(self, func, result='', operator='', **kwargs): # Getting the function prefix. default = {'function' : func, 'operator' : operator, 'result' : result } kwargs.update(default) super(SpatialFunction, self).__init__(**kwargs)
gpl-3.0
bdastur/notes
kubernetes/utils.py
1
1137
#!/usr/bin/env python import os import subprocess import json class Command(object): def __init__(self, kubeconfig=None): if kubeconfig is None: print("Require a kubeconfig file") return self.kubeconfig=kubeconfig def execute_cmd(self, cmd): cmd = cmd.split() myenv = os.environ.copy() myenv["KUBECONFIG"] = self.kubeconfig proc = subprocess.Popen(cmd, env=myenv, stdout=subprocess.PIPE) outs, errs = proc.communicate() print("outs:", outs) jdata = {} try: jdata = json.loads(outs) print "jdata: ", jdata except ValueError: print("No json data") return jdata class KubeParser(object): @staticmethod def pods_summary(json_data): for item in json_data['items']: print item['metadata']['name'] def main(): print("Main") kc = Command("/Users/behzad.dastur/clusters/ilmtest/kubeconfig") jdata = kc.execute_cmd("kubectl get pods -o json") kp = KubeParser() kp.pods_summary(jdata) if __name__ == "__main__": main()
apache-2.0
denny820909/builder
lib/python2.7/site-packages/SQLAlchemy-0.8.0b2-py2.7-linux-x86_64.egg/sqlalchemy/dialects/oracle/zxjdbc.py
6
7743
# oracle/zxjdbc.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: oracle+zxjdbc :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: oracle+zxjdbc://user:pass@host/dbname :driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html. """ import decimal import re from sqlalchemy import sql, types as sqltypes, util from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext from sqlalchemy.engine import result as _result from sqlalchemy.sql import expression import collections SQLException = zxJDBC = None class _ZxJDBCDate(sqltypes.Date): def result_processor(self, dialect, coltype): def process(value): if value is None: return None else: return value.date() return process class _ZxJDBCNumeric(sqltypes.Numeric): def result_processor(self, dialect, coltype): #XXX: does the dialect return Decimal or not??? # if it does (in all cases), we could use a None processor as well as # the to_float generic processor if self.asdecimal: def process(value): if isinstance(value, decimal.Decimal): return value else: return decimal.Decimal(str(value)) else: def process(value): if isinstance(value, decimal.Decimal): return float(value) else: return value return process class OracleCompiler_zxjdbc(OracleCompiler): def returning_clause(self, stmt, returning_cols): self.returning_cols = list(expression._select_iterables(returning_cols)) # within_columns_clause=False so that labels (foo AS bar) don't render columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in self.returning_cols] if not hasattr(self, 'returning_parameters'): self.returning_parameters = [] binds = [] for i, col in enumerate(self.returning_cols): dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi) self.returning_parameters.append((i + 1, dbtype)) bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype)) self.binds[bindparam.key] = bindparam binds.append(self.bindparam_string(self._truncate_bindparam(bindparam))) return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) class OracleExecutionContext_zxjdbc(OracleExecutionContext): def pre_exec(self): if hasattr(self.compiled, 'returning_parameters'): # prepare a zxJDBC statement so we can grab its underlying # OraclePreparedStatement's getReturnResultSet later self.statement = self.cursor.prepare(self.statement) def get_result_proxy(self): if hasattr(self.compiled, 'returning_parameters'): rrs = None try: try: rrs = self.statement.__statement__.getReturnResultSet() rrs.next() except SQLException, sqle: msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode()) if sqle.getSQLState() is not None: msg += ' [SQLState: %s]' % sqle.getSQLState() raise zxJDBC.Error(msg) else: row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype) for index, dbtype in self.compiled.returning_parameters) return ReturningResultProxy(self, row) finally: if rrs is not None: try: rrs.close() except SQLException: pass self.statement.close() return _result.ResultProxy(self) def create_cursor(self): cursor = self._dbapi_connection.cursor() cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) return cursor class ReturningResultProxy(_result.FullyBufferedResultProxy): """ResultProxy backed by the RETURNING ResultSet results.""" def __init__(self, context, returning_row): self._returning_row = returning_row super(ReturningResultProxy, self).__init__(context) def _cursor_description(self): ret = [] for c in self.context.compiled.returning_cols: if hasattr(c, 'name'): ret.append((c.name, c.type)) else: ret.append((c.anon_label, c.type)) return ret def _buffer_rows(self): return collections.deque([self._returning_row]) class ReturningParam(object): """A bindparam value representing a RETURNING parameter. Specially handled by OracleReturningDataHandler. """ def __init__(self, type): self.type = type def __eq__(self, other): if isinstance(other, ReturningParam): return self.type == other.type return NotImplemented def __ne__(self, other): if isinstance(other, ReturningParam): return self.type != other.type return NotImplemented def __repr__(self): kls = self.__class__ return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self), self.type) class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): jdbc_db_name = 'oracle' jdbc_driver_name = 'oracle.jdbc.OracleDriver' statement_compiler = OracleCompiler_zxjdbc execution_ctx_cls = OracleExecutionContext_zxjdbc colspecs = util.update_copy( OracleDialect.colspecs, { sqltypes.Date: _ZxJDBCDate, sqltypes.Numeric: _ZxJDBCNumeric } ) def __init__(self, *args, **kwargs): super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs) global SQLException, zxJDBC from java.sql import SQLException from com.ziclix.python.sql import zxJDBC from com.ziclix.python.sql.handler import OracleDataHandler class OracleReturningDataHandler(OracleDataHandler): """zxJDBC DataHandler that specially handles ReturningParam.""" def setJDBCObject(self, statement, index, object, dbtype=None): if type(object) is ReturningParam: statement.registerReturnParameter(index, object.type) elif dbtype is None: OracleDataHandler.setJDBCObject( self, statement, index, object) else: OracleDataHandler.setJDBCObject( self, statement, index, object, dbtype) self.DataHandler = OracleReturningDataHandler def initialize(self, connection): super(OracleDialect_zxjdbc, self).initialize(connection) self.implicit_returning = connection.connection.driverversion >= '10.2' def _create_jdbc_url(self, url): return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database) def _get_server_version_info(self, connection): version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1) return tuple(int(x) for x in version.split('.')) dialect = OracleDialect_zxjdbc
mit
anaran/kuma
kuma/wiki/constants.py
4
12866
import re import bleach from django.utils.translation import ugettext_lazy as _ ALLOWED_TAGS = bleach.ALLOWED_TAGS + [ 'div', 'span', 'p', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'pre', 'code', 'cite', 'dl', 'dt', 'dd', 'small', 'sub', 'sup', 'u', 'strike', 'samp', 'abbr', 'ul', 'ol', 'li', 'nobr', 'dfn', 'caption', 'var', 's', 'i', 'img', 'hr', 'input', 'label', 'select', 'option', 'textarea', # Note: <iframe> is allowed, but src="" is pre-filtered before bleach 'iframe', 'table', 'tbody', 'thead', 'tfoot', 'tr', 'th', 'td', 'colgroup', 'col', 'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure', 'figcaption', 'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output', 'progress', 'audio', 'video', 'details', 'summary', 'datagrid', 'datalist', 'table', 'address', 'font', 'bdi', 'bdo', 'del', 'ins', 'kbd', 'samp', 'var', 'ruby', 'rp', 'rt', 'q', # MathML 'math', 'maction', 'menclose', 'merror', 'mfenced', 'mfrac', 'mglyph', 'mi', 'mlabeledtr', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mroot', 'mrow', 'ms', 'mspace', 'msqrt', 'mstyle', 'msub', 'msup', 'msubsup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none', 'mprescripts', 'semantics', 'annotation', 'annotation-xml', ] ALLOWED_ATTRIBUTES = bleach.ALLOWED_ATTRIBUTES ALLOWED_ATTRIBUTES['*'] = ['lang'] # Note: <iframe> is allowed, but src="" is pre-filtered before bleach ALLOWED_ATTRIBUTES['iframe'] = ['id', 'src', 'sandbox', 'seamless', 'frameborder', 'width', 'height', 'class'] ALLOWED_ATTRIBUTES['p'] = ['style', 'class', 'id', 'align', 'lang', 'dir'] ALLOWED_ATTRIBUTES['span'] = ['style', 'class', 'id', 'title', 'lang', 'dir'] ALLOWED_ATTRIBUTES['abbr'] = ['style', 'class', 'id', 'title', 'lang', 'dir'] ALLOWED_ATTRIBUTES['img'] = ['src', 'id', 'align', 'alt', 'class', 'is', 'title', 'style', 'lang', 'dir', 'width', 'height'] ALLOWED_ATTRIBUTES['a'] = ['style', 'id', 'class', 'href', 'title', 'lang', 'name', 'dir', 'hreflang', 'rel'] ALLOWED_ATTRIBUTES['i'] = ['class'] ALLOWED_ATTRIBUTES['td'] = ['style', 'id', 'class', 'colspan', 'rowspan', 'lang', 'dir'] ALLOWED_ATTRIBUTES['th'] = ['style', 'id', 'class', 'colspan', 'rowspan', 'scope', 'lang', 'dir'] ALLOWED_ATTRIBUTES['video'] = ['style', 'id', 'class', 'lang', 'src', 'controls', 'dir'] ALLOWED_ATTRIBUTES['font'] = ['color', 'face', 'size', 'dir'] ALLOWED_ATTRIBUTES['details'] = ['open'] ALLOWED_ATTRIBUTES['select'] = ['name', 'dir'] ALLOWED_ATTRIBUTES['option'] = ['value', 'selected', 'dir'] ALLOWED_ATTRIBUTES['ol'] = ['style', 'class', 'id', 'lang', 'start', 'dir'] ALLOWED_ATTRIBUTES.update(dict((x, ['style', 'class', 'id', 'name', 'lang', 'dir']) for x in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'))) ALLOWED_ATTRIBUTES.update(dict((x, ['style', 'class', 'id', 'lang', 'dir', 'title']) for x in ( 'div', 'pre', 'ul', 'li', 'code', 'dl', 'dt', 'dd', 'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure', 'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output', 'progress', 'audio', 'details', 'datagrid', 'datalist', 'table', 'tr', 'address', 'col', 's', 'strong' ))) ALLOWED_ATTRIBUTES.update(dict((x, ['cite']) for x in ( 'blockquote', 'del', 'ins', 'q' ))) ALLOWED_ATTRIBUTES['li'] += ['data-default-state'] ALLOWED_ATTRIBUTES['time'] += ['datetime'] ALLOWED_ATTRIBUTES['ins'] = ['datetime'] ALLOWED_ATTRIBUTES['del'] = ['datetime'] # MathML ALLOWED_ATTRIBUTES.update(dict((x, ['encoding', 'src']) for x in ( 'annotation', 'annotation-xml'))) ALLOWED_ATTRIBUTES.update( dict((x, ['href', 'mathbackground', 'mathcolor', 'id', 'class', 'style']) for x in ('math', 'maction', 'menclose', 'merror', 'mfenced', 'mfrac', 'mglyph', 'mi', 'mlabeledtr', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mroot', 'mrow', 'ms', 'mspace', 'msqrt', 'mstyle', 'msub', 'msup', 'msubsup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none', 'mprescripts'))) ALLOWED_ATTRIBUTES['math'] += [ 'display', 'dir', 'selection', 'notation', 'close', 'open', 'separators', 'bevelled', 'denomalign', 'linethickness', 'numalign', 'largeop', 'maxsize', 'minsize', 'movablelimits', 'rspace', 'separator', 'stretchy', 'symmetric', 'depth', 'lquote', 'rquote', 'align', 'columnlines', 'frame', 'rowalign', 'rowspacing', 'rowspan', 'columnspan', 'accent', 'accentunder', 'dir', 'mathsize', 'mathvariant', 'subscriptshift', 'supscriptshift', 'scriptlevel', 'displaystyle', 'scriptsizemultiplier', 'scriptminsize', 'altimg', 'altimg-width', 'altimg-height', 'altimg-valign', 'alttext'] ALLOWED_ATTRIBUTES['maction'] += ['actiontype', 'selection'] ALLOWED_ATTRIBUTES['menclose'] += ['notation'] ALLOWED_ATTRIBUTES['mfenced'] += ['close', 'open', 'separators'] ALLOWED_ATTRIBUTES['mfrac'] += ['bevelled', 'denomalign', 'linethickness', 'numalign'] ALLOWED_ATTRIBUTES['mi'] += ['dir', 'mathsize', 'mathvariant'] ALLOWED_ATTRIBUTES['mi'] += ['mathsize', 'mathvariant'] ALLOWED_ATTRIBUTES['mmultiscripts'] += ['subscriptshift', 'superscriptshift'] ALLOWED_ATTRIBUTES['mo'] += ['largeop', 'lspace', 'maxsize', 'minsize', 'movablelimits', 'rspace', 'separator', 'stretchy', 'symmetric', 'accent', 'dir', 'mathsize', 'mathvariant'] ALLOWED_ATTRIBUTES['mover'] += ['accent'] ALLOWED_ATTRIBUTES['mpadded'] += ['lspace', 'voffset', 'depth'] ALLOWED_ATTRIBUTES['mrow'] += ['dir'] ALLOWED_ATTRIBUTES['ms'] += ['lquote', 'rquote', 'dir', 'mathsize', 'mathvariant'] ALLOWED_ATTRIBUTES['mspace'] += ['depth', 'height', 'width'] ALLOWED_ATTRIBUTES['mstyle'] += [ 'display', 'dir', 'selection', 'notation', 'close', 'open', 'separators', 'bevelled', 'denomalign', 'linethickness', 'numalign', 'largeop', 'maxsize', 'minsize', 'movablelimits', 'rspace', 'separator', 'stretchy', 'symmetric', 'depth', 'lquote', 'rquote', 'align', 'columnlines', 'frame', 'rowalign', 'rowspacing', 'rowspan', 'columnspan', 'accent', 'accentunder', 'dir', 'mathsize', 'mathvariant', 'subscriptshift', 'supscriptshift', 'scriptlevel', 'displaystyle', 'scriptsizemultiplier', 'scriptminsize'] ALLOWED_ATTRIBUTES['msub'] += ['subscriptshift'] ALLOWED_ATTRIBUTES['msubsup'] += ['subscriptshift', 'superscriptshift'] ALLOWED_ATTRIBUTES['msup'] += ['superscriptshift'] ALLOWED_ATTRIBUTES['mtable'] += ['align', 'columnalign', 'columnlines', 'frame', 'rowalign', 'rowspacing', 'rowlines'] ALLOWED_ATTRIBUTES['mtd'] += ['columnalign', 'columnspan', 'rowalign', 'rowspan'] ALLOWED_ATTRIBUTES['mtext'] += ['dir', 'mathsize', 'mathvariant'] ALLOWED_ATTRIBUTES['mtr'] += ['columnalign', 'rowalign'] ALLOWED_ATTRIBUTES['munder'] += ['accentunder'] ALLOWED_ATTRIBUTES['mundermover'] = ['accent', 'accentunder'] # CSS ALLOWED_STYLES = [ 'border', 'border-top', 'border-right', 'border-bottom', 'border-left', 'float', 'overflow', 'min-height', 'vertical-align', 'white-space', 'color', 'border-radius', '-webkit-border-radius', '-moz-border-radius, -o-border-radius', 'margin', 'margin-left', 'margin-top', 'margin-bottom', 'margin-right', 'padding', 'padding-left', 'padding-top', 'padding-bottom', 'padding-right', 'position', 'top', 'height', 'left', 'right', 'background', # TODO: Maybe not this one, it can load URLs 'background-color', 'font', 'font-size', 'font-weight', 'font-family', 'font-variant', 'text-align', 'text-transform', '-moz-column-width', '-webkit-columns', 'columns', 'width', 'list-style-type', 'line-height', # CSS properties needed for live examples (pending proper solution): 'backface-visibility', '-moz-backface-visibility', '-webkit-backface-visibility', '-o-backface-visibility', 'perspective', '-moz-perspective', '-webkit-perspective', '-o-perspective', 'perspective-origin', '-moz-perspective-origin', '-webkit-perspective-origin', '-o-perspective-origin', 'transform', '-moz-transform', '-webkit-transform', '-o-transform', 'transform-style', '-moz-transform-style', '-webkit-transform-style', '-o-transform-style', 'columns', '-moz-columns', '-webkit-columns', 'column-rule', '-moz-column-rule', '-webkit-column-rule', 'column-width', '-moz-column-width', '-webkit-column-width', 'image-rendering', '-ms-interpolation-mode', 'position', 'border-style', 'background-clip', 'border-bottom-right-radius', 'border-bottom-left-radius', 'border-top-right-radius', 'border-top-left-radius', 'border-bottom-style', 'border-left-style', 'border-right-style', 'border-top-style', 'border-bottom-width', 'border-left-width', 'border-right-width', 'border-top-width', 'vertical-align', 'border-collapse', 'border-width', 'border-color', 'border-left', 'border-right', 'border-bottom', 'border-top', 'clip', 'cursor', 'filter', 'float', 'max-width', 'font-style', 'letter-spacing', 'opacity', 'zoom', 'text-overflow', 'text-indent', 'text-rendering', 'text-shadow', 'transition', 'transition', 'transition', 'transition', 'transition-delay', '-moz-transition-delay', '-webkit-transition-delay', '-o-transition-delay', 'transition-duration', '-moz-transition-duration', '-webkit-transition-duration', '-o-transition-duration', 'transition-property', '-moz-transition-property', '-webkit-transition-property', '-o-transition-property', 'transition-timing-function', '-moz-transition-timing-function', '-webkit-transition-timing-function', '-o-transition-timing-function', 'color', 'display', 'position', 'outline-color', 'outline', 'outline-offset', 'box-shadow', '-moz-box-shadow', '-webkit-box-shadow', '-o-box-shadow', 'linear-gradient', '-moz-linear-gradient', '-webkit-linear-gradient', 'radial-gradient', '-moz-radial-gradient', '-webkit-radial-gradient', 'text-decoration-style', '-moz-text-decoration-style', 'text-decoration', 'direction', 'white-space', 'unicode-bidi', 'word-wrap' ] DIFF_WRAP_COLUMN = 65 TEMPLATE_TITLE_PREFIX = 'Template:' DOCUMENTS_PER_PAGE = 100 KUMASCRIPT_TIMEOUT_ERROR = [ {"level": "error", "message": "Request to Kumascript service timed out", "args": ["TimeoutError"]} ] # TODO: Put this under the control of Constance / Waffle? # Flags used to signify revisions in need of review REVIEW_FLAG_TAGS = ( ('technical', _('Technical - code samples, APIs, or technologies')), ('editorial', _('Editorial - prose, grammar, or content')), ) REVIEW_FLAG_TAGS_DEFAULT = ['technical', 'editorial'] LOCALIZATION_FLAG_TAGS = ( ('inprogress', _('Localization in progress - not completely translated yet.')), ) # TODO: This is info derived from urls.py, but unsure how to DRY it RESERVED_SLUGS = ( r'ckeditor_config\.js$', r'watch-ready-for-review$', r'unwatch-ready-for-review$', r'watch-approved$', r'unwatch-approved$', r'\.json$', r'new$', r'all$', r'templates$', r'preview-wiki-content$', r'category/\d+$', r'needs-review/?[^/]+$', r'needs-review/?', r'feeds/[^/]+/all/?', r'feeds/[^/]+/needs-review/[^/]+$', r'feeds/[^/]+/needs-review/?', r'tag/[^/]+' ) RESERVED_SLUGS_RES = [re.compile(pattern) for pattern in RESERVED_SLUGS] SLUG_CLEANSING_RE = re.compile(r'^\/?(([A-z-]+)?\/?docs\/)?') # ?, whitespace, percentage, quote disallowed in slugs altogether INVALID_DOC_SLUG_CHARS_RE = re.compile(r"""[\s'"%%\?\$]+""") INVALID_REV_SLUG_CHARS_RE = re.compile(r"""[\s\?\/%%]+""") DOCUMENT_PATH_RE = re.compile(r'[^\$]+') # how a redirect looks as rendered HTML REDIRECT_HTML = 'REDIRECT <a class="redirect"' REDIRECT_CONTENT = 'REDIRECT <a class="redirect" href="%(href)s">%(title)s</a>' DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL = u'kuma:document-last-modified:%s' DEKI_FILE_URL = re.compile(r'@api/deki/files/(?P<file_id>\d+)/=') KUMA_FILE_URL = re.compile(r'/files/(?P<file_id>\d+)/.+\..+') SPAM_EXEMPTED_FLAG = 'wiki_spam_exempted'
mpl-2.0
I-TECH-UW/mwachx
utils/forms.py
2
1408
import json, datetime from django import forms from django.utils.safestring import mark_safe from django.utils.html import format_html import utils class Html5DateInput(forms.DateInput): input_type = 'date' class AngularPopupDatePicker(forms.DateInput): def __init__(self,attrs=None,max=None,min=None): if attrs is None: attrs = {} if max is not None: attrs['max-date'] = convert_date(max) if min is not None: attrs['min-date'] = convert_date(min) attrs['datepicker-popup'] = True attrs['is-open'] = 'status.{name}' attrs['placeholder'] = 'yyyy-MM-dd' super(AngularPopupDatePicker,self).__init__(attrs) def render(self,name,value,attrs=None): input_str = super(AngularPopupDatePicker,self).render(name,value,attrs) input_str = format_html(input_str,name=name) tmpl_str = '<p class="input-group">{input_str}<span class="input-group-btn">' tmpl_str += '<button type="button" class="btn btn-default" ng-click="status.{name} = !status.{name}">' tmpl_str += '<i class="glyphicon glyphicon-calendar"></i></button></span>' return format_html(tmpl_str,input_str=input_str,name=name) def convert_date(date_in): date = utils.today() if hasattr(date_in,'strftime'): # Date object date = date_in elif hasattr(date_in,'real'): # number object assume delta days date = utils.today() + datetime.timedelta(days=date_in) return "'{}'".format(date.strftime('%Y-%m-%d'))
apache-2.0
kmod/icbd
stdlib/python2.5/idlelib/Percolator.py
69
2600
from WidgetRedirector import WidgetRedirector from Delegator import Delegator class Percolator: def __init__(self, text): # XXX would be nice to inherit from Delegator self.text = text self.redir = WidgetRedirector(text) self.top = self.bottom = Delegator(text) self.bottom.insert = self.redir.register("insert", self.insert) self.bottom.delete = self.redir.register("delete", self.delete) self.filters = [] def close(self): while self.top is not self.bottom: self.removefilter(self.top) self.top = None self.bottom.setdelegate(None); self.bottom = None self.redir.close(); self.redir = None self.text = None def insert(self, index, chars, tags=None): # Could go away if inheriting from Delegator self.top.insert(index, chars, tags) def delete(self, index1, index2=None): # Could go away if inheriting from Delegator self.top.delete(index1, index2) def insertfilter(self, filter): # Perhaps rename to pushfilter()? assert isinstance(filter, Delegator) assert filter.delegate is None filter.setdelegate(self.top) self.top = filter def removefilter(self, filter): # XXX Perhaps should only support popfilter()? assert isinstance(filter, Delegator) assert filter.delegate is not None f = self.top if f is filter: self.top = filter.delegate filter.setdelegate(None) else: while f.delegate is not filter: assert f is not self.bottom f.resetcache() f = f.delegate f.setdelegate(filter.delegate) filter.setdelegate(None) def main(): class Tracer(Delegator): def __init__(self, name): self.name = name Delegator.__init__(self, None) def insert(self, *args): print self.name, ": insert", args self.delegate.insert(*args) def delete(self, *args): print self.name, ": delete", args self.delegate.delete(*args) root = Tk() root.wm_protocol("WM_DELETE_WINDOW", root.quit) text = Text() text.pack() text.focus_set() p = Percolator(text) t1 = Tracer("t1") t2 = Tracer("t2") p.insertfilter(t1) p.insertfilter(t2) root.mainloop() p.removefilter(t2) root.mainloop() p.insertfilter(t2) p.removefilter(t1) root.mainloop() if __name__ == "__main__": from Tkinter import * main()
mit
dgarros/ansible
lib/ansible/modules/cloud/vmware/vca_fw.py
66
8429
#!/usr/bin/python # Copyright (c) 2015 VMware, Inc. All Rights Reserved. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: vca_fw short_description: add remove firewall rules in a gateway in a vca description: - Adds or removes firewall rules from a gateway in a vca environment version_added: "2.0" author: Peter Sprygada (@privateip) options: fw_rules: description: - A list of firewall rules to be added to the gateway, Please see examples on valid entries required: True default: false extends_documentation_fragment: vca.documentation ''' EXAMPLES = ''' #Add a set of firewall rules - hosts: localhost connection: local tasks: - vca_fw: instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282' vdc_name: 'benz_ansible' state: 'absent' fw_rules: - description: "ben testing" source_ip: "Any" dest_ip: 192.0.2.23 - description: "ben testing 2" source_ip: 192.0.2.50 source_port: "Any" dest_port: "22" dest_ip: 192.0.2.101 is_enable: "true" enable_logging: "false" protocol: "Tcp" policy: "allow" ''' try: from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType except ImportError: # normally set a flag here but it will be caught when testing for # the existence of pyvcloud (see module_utils/vca.py). This just # protects against generating an exception at runtime pass VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any'] VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', 'dest_ip', 'dest_port', 'source_ip', 'source_port', 'protocol'] def protocol_to_tuple(protocol): return (protocol.get_Tcp(), protocol.get_Udp(), protocol.get_Icmp(), protocol.get_Other(), protocol.get_Any()) def protocol_to_string(protocol): protocol = protocol_to_tuple(protocol) if protocol[0] is True: return 'Tcp' elif protocol[1] is True: return 'Udp' elif protocol[2] is True: return 'Icmp' elif protocol[3] is True: return 'Other' elif protocol[4] is True: return 'Any' def protocol_to_type(protocol): try: protocols = ProtocolsType() setattr(protocols, protocol, True) return protocols except AttributeError: raise VcaError("The value in protocol is not valid") def validate_fw_rules(fw_rules): for rule in fw_rules: for k in rule.keys(): if k not in VALID_RULE_KEYS: raise VcaError("%s is not a valid key in fw rules, please " "check above.." % k, valid_keys=VALID_RULE_KEYS) rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower() rule['dest_ip'] = rule.get('dest_ip', 'Any').lower() rule['source_port'] = str(rule.get('source_port', 'Any')).lower() rule['source_ip'] = rule.get('source_ip', 'Any').lower() rule['protocol'] = rule.get('protocol', 'Any').lower() rule['policy'] = rule.get('policy', 'allow').lower() rule['is_enable'] = rule.get('is_enable', True) rule['enable_logging'] = rule.get('enable_logging', False) rule['description'] = rule.get('description', 'rule added by Ansible') return fw_rules def fw_rules_to_dict(rules): fw_rules = list() for rule in rules: fw_rules.append( dict( dest_port=rule.get_DestinationPortRange().lower(), dest_ip=rule.get_DestinationIp().lower().lower(), source_port=rule.get_SourcePortRange().lower(), source_ip=rule.get_SourceIp().lower(), protocol=protocol_to_string(rule.get_Protocols()).lower(), policy=rule.get_Policy().lower(), is_enable=rule.get_IsEnabled(), enable_logging=rule.get_EnableLogging(), description=rule.get_Description() ) ) return fw_rules def create_fw_rule(is_enable, description, policy, protocol, dest_port, dest_ip, source_port, source_ip, enable_logging): return FirewallRuleType(IsEnabled=is_enable, Description=description, Policy=policy, Protocols=protocol_to_type(protocol), DestinationPortRange=dest_port, DestinationIp=dest_ip, SourcePortRange=source_port, SourceIp=source_ip, EnableLogging=enable_logging) def main(): argument_spec = vca_argument_spec() argument_spec.update( dict( fw_rules = dict(required=True, type='list'), gateway_name = dict(default='gateway'), state = dict(default='present', choices=['present', 'absent']) ) ) module = AnsibleModule(argument_spec, supports_check_mode=True) fw_rules = module.params.get('fw_rules') gateway_name = module.params.get('gateway_name') vdc_name = module.params['vdc_name'] vca = vca_login(module) gateway = vca.get_gateway(vdc_name, gateway_name) if not gateway: module.fail_json(msg="Not able to find the gateway %s, please check " "the gateway_name param" % gateway_name) fwservice = gateway._getFirewallService() rules = gateway.get_fw_rules() current_rules = fw_rules_to_dict(rules) try: desired_rules = validate_fw_rules(fw_rules) except VcaError as e: module.fail_json(msg=e.message) result = dict(changed=False) result['current_rules'] = current_rules result['desired_rules'] = desired_rules updates = list() additions = list() deletions = list() for (index, rule) in enumerate(desired_rules): try: if rule != current_rules[index]: updates.append((index, rule)) except IndexError: additions.append(rule) eol = len(current_rules) > len(desired_rules) if eol > 0: for rule in current_rules[eos:]: deletions.append(rule) for rule in additions: if not module.check_mode: rule['protocol'] = rule['protocol'].capitalize() gateway.add_fw_rule(**rule) result['changed'] = True for index, rule in updates: if not module.check_mode: rule = create_fw_rule(**rule) fwservice.replace_FirewallRule_at(index, rule) result['changed'] = True keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip'] for rule in deletions: if not module.check_mode: kwargs = dict([(k, v) for k, v in rule.items() if k in keys]) kwargs['protocol'] = protocol_to_string(kwargs['protocol']) gateway.delete_fw_rule(**kwargs) result['changed'] = True if not module.check_mode and result['changed'] is True: task = gateway.save_services_configuration() if task: vca.block_until_completed(task) result['rules_updated'] = count=len(updates) result['rules_added'] = count=len(additions) result['rules_deleted'] = count=len(deletions) return module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.vca import * if __name__ == '__main__': main()
gpl-3.0
Work4Labs/lettuce
tests/integration/lib/Django-1.3/django/contrib/gis/gdal/geomtype.py
404
2967
from django.contrib.gis.gdal.error import OGRException #### OGRGeomType #### class OGRGeomType(object): "Encapulates OGR Geometry Types." wkb25bit = -2147483648 # Dictionary of acceptable OGRwkbGeometryType s and their string names. _types = {0 : 'Unknown', 1 : 'Point', 2 : 'LineString', 3 : 'Polygon', 4 : 'MultiPoint', 5 : 'MultiLineString', 6 : 'MultiPolygon', 7 : 'GeometryCollection', 100 : 'None', 101 : 'LinearRing', 1 + wkb25bit: 'Point25D', 2 + wkb25bit: 'LineString25D', 3 + wkb25bit: 'Polygon25D', 4 + wkb25bit: 'MultiPoint25D', 5 + wkb25bit : 'MultiLineString25D', 6 + wkb25bit : 'MultiPolygon25D', 7 + wkb25bit : 'GeometryCollection25D', } # Reverse type dictionary, keyed by lower-case of the name. _str_types = dict([(v.lower(), k) for k, v in _types.items()]) def __init__(self, type_input): "Figures out the correct OGR Type based upon the input." if isinstance(type_input, OGRGeomType): num = type_input.num elif isinstance(type_input, basestring): type_input = type_input.lower() if type_input == 'geometry': type_input='unknown' num = self._str_types.get(type_input, None) if num is None: raise OGRException('Invalid OGR String Type "%s"' % type_input) elif isinstance(type_input, int): if not type_input in self._types: raise OGRException('Invalid OGR Integer Type: %d' % type_input) num = type_input else: raise TypeError('Invalid OGR input type given.') # Setting the OGR geometry type number. self.num = num def __str__(self): "Returns the value of the name property." return self.name def __eq__(self, other): """ Does an equivalence test on the OGR type with the given other OGRGeomType, the short-hand string, or the integer. """ if isinstance(other, OGRGeomType): return self.num == other.num elif isinstance(other, basestring): return self.name.lower() == other.lower() elif isinstance(other, int): return self.num == other else: return False def __ne__(self, other): return not (self == other) @property def name(self): "Returns a short-hand string form of the OGR Geometry type." return self._types[self.num] @property def django(self): "Returns the Django GeometryField for this OGR Type." s = self.name.replace('25D', '') if s in ('LinearRing', 'None'): return None elif s == 'Unknown': s = 'Geometry' return s + 'Field'
gpl-3.0
dhruvsrivastava/OJ
flask/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py
475
9162
""" lockfile.py - Platform-independent advisory file locks. Requires Python 2.5 unless you apply 2.4.diff Locking is done on a per-thread basis instead of a per-process basis. Usage: >>> lock = LockFile('somefile') >>> try: ... lock.acquire() ... except AlreadyLocked: ... print 'somefile', 'is locked already.' ... except LockFailed: ... print 'somefile', 'can\\'t be locked.' ... else: ... print 'got lock' got lock >>> print lock.is_locked() True >>> lock.release() >>> lock = LockFile('somefile') >>> print lock.is_locked() False >>> with lock: ... print lock.is_locked() True >>> print lock.is_locked() False >>> lock = LockFile('somefile') >>> # It is okay to lock twice from the same thread... >>> with lock: ... lock.acquire() ... >>> # Though no counter is kept, so you can't unlock multiple times... >>> print lock.is_locked() False Exceptions: Error - base class for other exceptions LockError - base class for all locking exceptions AlreadyLocked - Another thread or process already holds the lock LockFailed - Lock failed for some other reason UnlockError - base class for all unlocking exceptions AlreadyUnlocked - File was not locked. NotMyLock - File was locked but not by the current thread/process """ from __future__ import absolute_import import sys import socket import os import threading import time import urllib import warnings import functools # Work with PEP8 and non-PEP8 versions of threading module. if not hasattr(threading, "current_thread"): threading.current_thread = threading.currentThread if not hasattr(threading.Thread, "get_name"): threading.Thread.get_name = threading.Thread.getName __all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', 'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile', 'LockBase', 'locked'] class Error(Exception): """ Base class for other exceptions. >>> try: ... raise Error ... except Exception: ... pass """ pass class LockError(Error): """ Base class for error arising from attempts to acquire the lock. >>> try: ... raise LockError ... except Error: ... pass """ pass class LockTimeout(LockError): """Raised when lock creation fails within a user-defined period of time. >>> try: ... raise LockTimeout ... except LockError: ... pass """ pass class AlreadyLocked(LockError): """Some other thread/process is locking the file. >>> try: ... raise AlreadyLocked ... except LockError: ... pass """ pass class LockFailed(LockError): """Lock file creation failed for some other reason. >>> try: ... raise LockFailed ... except LockError: ... pass """ pass class UnlockError(Error): """ Base class for errors arising from attempts to release the lock. >>> try: ... raise UnlockError ... except Error: ... pass """ pass class NotLocked(UnlockError): """Raised when an attempt is made to unlock an unlocked file. >>> try: ... raise NotLocked ... except UnlockError: ... pass """ pass class NotMyLock(UnlockError): """Raised when an attempt is made to unlock a file someone else locked. >>> try: ... raise NotMyLock ... except UnlockError: ... pass """ pass class LockBase: """Base class for platform-specific lock classes.""" def __init__(self, path, threaded=True, timeout=None): """ >>> lock = LockBase('somefile') >>> lock = LockBase('somefile', threaded=False) """ self.path = path self.lock_file = os.path.abspath(path) + ".lock" self.hostname = socket.gethostname() self.pid = os.getpid() if threaded: t = threading.current_thread() # Thread objects in Python 2.4 and earlier do not have ident # attrs. Worm around that. ident = getattr(t, "ident", hash(t)) self.tname = "-%x" % (ident & 0xffffffff) else: self.tname = "" dirname = os.path.dirname(self.lock_file) # unique name is mostly about the current process, but must # also contain the path -- otherwise, two adjacent locked # files conflict (one file gets locked, creating lock-file and # unique file, the other one gets locked, creating lock-file # and overwriting the already existing lock-file, then one # gets unlocked, deleting both lock-file and unique file, # finally the last lock errors out upon releasing. self.unique_name = os.path.join(dirname, "%s%s.%s%s" % (self.hostname, self.tname, self.pid, hash(self.path))) self.timeout = timeout def acquire(self, timeout=None): """ Acquire the lock. * If timeout is omitted (or None), wait forever trying to lock the file. * If timeout > 0, try to acquire the lock for that many seconds. If the lock period expires and the file is still locked, raise LockTimeout. * If timeout <= 0, raise AlreadyLocked immediately if the file is already locked. """ raise NotImplemented("implement in subclass") def release(self): """ Release the lock. If the file is not locked, raise NotLocked. """ raise NotImplemented("implement in subclass") def is_locked(self): """ Tell whether or not the file is locked. """ raise NotImplemented("implement in subclass") def i_am_locking(self): """ Return True if this object is locking the file. """ raise NotImplemented("implement in subclass") def break_lock(self): """ Remove a lock. Useful if a locking thread failed to unlock. """ raise NotImplemented("implement in subclass") def __enter__(self): """ Context manager support. """ self.acquire() return self def __exit__(self, *_exc): """ Context manager support. """ self.release() def __repr__(self): return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, self.path) def _fl_helper(cls, mod, *args, **kwds): warnings.warn("Import from %s module instead of lockfile package" % mod, DeprecationWarning, stacklevel=2) # This is a bit funky, but it's only for awhile. The way the unit tests # are constructed this function winds up as an unbound method, so it # actually takes three args, not two. We want to toss out self. if not isinstance(args[0], str): # We are testing, avoid the first arg args = args[1:] if len(args) == 1 and not kwds: kwds["threaded"] = True return cls(*args, **kwds) def LinkFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import LinkLockFile from the lockfile.linklockfile module. """ from . import linklockfile return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", *args, **kwds) def MkdirFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import MkdirLockFile from the lockfile.mkdirlockfile module. """ from . import mkdirlockfile return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", *args, **kwds) def SQLiteFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import SQLiteLockFile from the lockfile.mkdirlockfile module. """ from . import sqlitelockfile return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", *args, **kwds) def locked(path, timeout=None): """Decorator which enables locks for decorated function. Arguments: - path: path for lockfile. - timeout (optional): Timeout for acquiring lock. Usage: @locked('/var/run/myname', timeout=0) def myname(...): ... """ def decor(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock = FileLock(path, timeout=timeout) lock.acquire() try: return func(*args, **kwargs) finally: lock.release() return wrapper return decor if hasattr(os, "link"): from . import linklockfile as _llf LockFile = _llf.LinkLockFile else: from . import mkdirlockfile as _mlf LockFile = _mlf.MkdirLockFile FileLock = LockFile
bsd-3-clause
vedujoshi/tempest
tempest/hacking/checks.py
1
10232
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import pep8 PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron', 'ironic', 'heat', 'sahara'] PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS)) TEST_DEFINITION = re.compile(r'^\s*def test.*') SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class') SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)') VI_HEADER_RE = re.compile(r"^#\s+vim?:.+") RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)") mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)') METHOD = re.compile(r"^ def .+") METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+") METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+") CLASS = re.compile(r"^class .+") def import_no_clients_in_api_and_scenario_tests(physical_line, filename): """Check for client imports from tempest/api & tempest/scenario tests T102: Cannot import OpenStack python clients """ if "tempest/api" in filename or "tempest/scenario" in filename: res = PYTHON_CLIENT_RE.match(physical_line) if res: return (physical_line.find(res.group(1)), ("T102: python clients import not allowed" " in tempest/api/* or tempest/scenario/* tests")) def scenario_tests_need_service_tags(physical_line, filename, previous_logical): """Check that scenario tests have service tags T104: Scenario tests require a services decorator """ if 'tempest/scenario/' in filename and '/test_' in filename: if TEST_DEFINITION.match(physical_line): if not SCENARIO_DECORATOR.match(previous_logical): return (physical_line.find('def'), "T104: Scenario tests require a service decorator") def no_setup_teardown_class_for_tests(physical_line, filename): if pep8.noqa(physical_line): return if 'tempest/test.py' in filename or 'tempest/lib/' in filename: return if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line): return (physical_line.find('def'), "T105: (setUp|tearDown)Class can not be used in tests") def no_vi_headers(physical_line, line_number, lines): """Check for vi editor configuration in source files. By default vi modelines can only appear in the first or last 5 lines of a source file. T106 """ # NOTE(gilliard): line_number is 1-indexed if line_number <= 5 or line_number > len(lines) - 5: if VI_HEADER_RE.match(physical_line): return 0, "T106: Don't put vi configuration in source files" def service_tags_not_in_module_path(physical_line, filename): """Check that a service tag isn't in the module path A service tag should only be added if the service name isn't already in the module path. T107 """ # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are # created for services like heat which would cause false negatives for # those tests, so just exclude the scenario tests. if 'tempest/scenario' not in filename: matches = SCENARIO_DECORATOR.match(physical_line) if matches: services = matches.group(1).split(',') for service in services: service_name = service.strip().strip("'") modulepath = os.path.split(filename)[0] if service_name in modulepath: return (physical_line.find(service_name), "T107: service tag should not be in path") def no_hyphen_at_end_of_rand_name(logical_line, filename): """Check no hyphen at the end of rand_name() argument T108 """ msg = "T108: hyphen should not be specified at the end of rand_name()" if RAND_NAME_HYPHEN_RE.match(logical_line): return 0, msg def no_mutable_default_args(logical_line): """Check that mutable object isn't used as default argument N322: Method's default argument shouldn't be mutable """ msg = "N322: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) def no_testtools_skip_decorator(logical_line): """Check that methods do not have the testtools.skip decorator T109 """ if TESTTOOLS_SKIP_DECORATOR.match(logical_line): yield (0, "T109: Cannot use testtools.skip decorator; instead use " "decorators.skip_because from tempest.lib") def _common_service_clients_check(logical_line, physical_line, filename, ignored_list_file=None): if not re.match('tempest/(lib/)?services/.*', filename): return False if ignored_list_file is not None: ignored_list = [] with open('tempest/hacking/' + ignored_list_file) as f: for line in f: ignored_list.append(line.strip()) if filename in ignored_list: return False if not METHOD.match(physical_line): return False if pep8.noqa(physical_line): return False return True def get_resources_on_service_clients(logical_line, physical_line, filename, line_number, lines): """Check that service client names of GET should be consistent T110 """ if not _common_service_clients_check(logical_line, physical_line, filename, 'ignored_list_T110.txt'): return for line in lines[line_number:]: if METHOD.match(line) or CLASS.match(line): # the end of a method return if 'self.get(' not in line and ('self.show_resource(' not in line and 'self.list_resources(' not in line): continue if METHOD_GET_RESOURCE.match(logical_line): return msg = ("T110: [GET /resources] methods should be list_<resource name>s" " or show_<resource name>") yield (0, msg) def delete_resources_on_service_clients(logical_line, physical_line, filename, line_number, lines): """Check that service client names of DELETE should be consistent T111 """ if not _common_service_clients_check(logical_line, physical_line, filename, 'ignored_list_T111.txt'): return for line in lines[line_number:]: if METHOD.match(line) or CLASS.match(line): # the end of a method return if 'self.delete(' not in line and 'self.delete_resource(' not in line: continue if METHOD_DELETE_RESOURCE.match(logical_line): return msg = ("T111: [DELETE /resources/<id>] methods should be " "delete_<resource name>") yield (0, msg) def dont_import_local_tempest_into_lib(logical_line, filename): """Check that tempest.lib should not import local tempest code T112 """ if 'tempest/lib/' not in filename: return if not ('from tempest' in logical_line or 'import tempest' in logical_line): return if ('from tempest.lib' in logical_line or 'import tempest.lib' in logical_line): return msg = ("T112: tempest.lib should not import local tempest code to avoid " "circular dependency") yield (0, msg) def use_rand_uuid_instead_of_uuid4(logical_line, filename): """Check that tests use data_utils.rand_uuid() instead of uuid.uuid4() T113 """ if 'tempest/lib/' in filename: return if 'uuid.uuid4()' not in logical_line: return msg = ("T113: Tests should use data_utils.rand_uuid()/rand_uuid_hex() " "instead of uuid.uuid4()/uuid.uuid4().hex") yield (0, msg) def dont_use_config_in_tempest_lib(logical_line, filename): """Check that tempest.lib doesn't use tempest config T114 """ if 'tempest/lib/' not in filename: return if ('tempest.config' in logical_line or 'from tempest import config' in logical_line or 'oslo_config' in logical_line): msg = ('T114: tempest.lib can not have any dependency on tempest ' 'config.') yield(0, msg) def dont_put_admin_tests_on_nonadmin_path(logical_line, physical_line, filename): """Check admin tests should exist under admin path T115 """ if 'tempest/api/' not in filename: return if pep8.noqa(physical_line): return if not re.match('class .*Test.*\(.*Admin.*\):', logical_line): return if not re.match('.\/tempest\/api\/.*\/admin\/.*', filename): msg = 'T115: All admin tests should exist under admin path.' yield(0, msg) def factory(register): register(import_no_clients_in_api_and_scenario_tests) register(scenario_tests_need_service_tags) register(no_setup_teardown_class_for_tests) register(no_vi_headers) register(service_tags_not_in_module_path) register(no_hyphen_at_end_of_rand_name) register(no_mutable_default_args) register(no_testtools_skip_decorator) register(get_resources_on_service_clients) register(delete_resources_on_service_clients) register(dont_import_local_tempest_into_lib) register(dont_use_config_in_tempest_lib) register(use_rand_uuid_instead_of_uuid4) register(dont_put_admin_tests_on_nonadmin_path)
apache-2.0
DeltaEpsilon-HackFMI2/FMICalendar-REST
venv/lib/python2.7/site-packages/django/utils/autoreload.py
110
5262
# Autoreloading launcher. # Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org). # Some taken from Ian Bicking's Paste (http://pythonpaste.org/). # # Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the CherryPy Team nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, time, signal try: from django.utils.six.moves import _thread as thread except ImportError: from django.utils.six.moves import _dummy_thread as thread # This import does nothing, but it's necessary to avoid some race conditions # in the threading module. See http://code.djangoproject.com/ticket/2330 . try: import threading except ImportError: pass try: import termios except ImportError: termios = None RUN_RELOADER = True _mtimes = {} _win = (sys.platform == "win32") def code_changed(): global _mtimes, _win filenames = [getattr(m, "__file__", None) for m in sys.modules.values()] for filename in filter(None, filenames): if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] if filename.endswith("$py.class"): filename = filename[:-9] + ".py" if not os.path.exists(filename): continue # File might be in an egg, so it can't be reloaded. stat = os.stat(filename) mtime = stat.st_mtime if _win: mtime -= stat.st_ctime if filename not in _mtimes: _mtimes[filename] = mtime continue if mtime != _mtimes[filename]: _mtimes = {} return True return False def ensure_echo_on(): if termios: fd = sys.stdin if fd.isatty(): attr_list = termios.tcgetattr(fd) if not attr_list[3] & termios.ECHO: attr_list[3] |= termios.ECHO if hasattr(signal, 'SIGTTOU'): old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) else: old_handler = None termios.tcsetattr(fd, termios.TCSANOW, attr_list) if old_handler is not None: signal.signal(signal.SIGTTOU, old_handler) def reloader_thread(): ensure_echo_on() while RUN_RELOADER: if code_changed(): sys.exit(3) # force reload time.sleep(1) def restart_with_reloader(): while True: args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv if sys.platform == "win32": args = ['"%s"' % arg for arg in args] new_environ = os.environ.copy() new_environ["RUN_MAIN"] = 'true' exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ) if exit_code != 3: return exit_code def python_reloader(main_func, args, kwargs): if os.environ.get("RUN_MAIN") == "true": thread.start_new_thread(main_func, args, kwargs) try: reloader_thread() except KeyboardInterrupt: pass else: try: exit_code = restart_with_reloader() if exit_code < 0: os.kill(os.getpid(), -exit_code) else: sys.exit(exit_code) except KeyboardInterrupt: pass def jython_reloader(main_func, args, kwargs): from _systemrestart import SystemRestart thread.start_new_thread(main_func, args) while True: if code_changed(): raise SystemRestart time.sleep(1) def main(main_func, args=None, kwargs=None): if args is None: args = () if kwargs is None: kwargs = {} if sys.platform.startswith('java'): reloader = jython_reloader else: reloader = python_reloader reloader(main_func, args, kwargs)
mit
vismartltd/edx-platform
common/lib/xmodule/xmodule/error_module.py
30
6875
""" Modules that get shown to the users when an error has occurred while loading or rendering other modules """ import hashlib import logging import json import sys from lxml import etree from xmodule.x_module import XModule, XModuleDescriptor from xmodule.errortracker import exc_info_to_str from xblock.fields import String, Scope, ScopeIds from xblock.field_data import DictFieldData from xmodule.modulestore import EdxJSONEncoder log = logging.getLogger(__name__) # NOTE: This is not the most beautiful design in the world, but there's no good # way to tell if the module is being used in a staff context or not. Errors that get discovered # at course load time are turned into ErrorDescriptor objects, and automatically hidden from students. # Unfortunately, we can also have errors when loading modules mid-request, and then we need to decide # what to show, and the logic for that belongs in the LMS (e.g. in get_module), so the error handler # decides whether to create a staff or not-staff module. class ErrorFields(object): """ XBlock fields used by the ErrorModules """ contents = String(scope=Scope.content) error_msg = String(scope=Scope.content) display_name = String(scope=Scope.settings) class ErrorModule(ErrorFields, XModule): """ Module that gets shown to staff when there has been an error while loading or rendering other modules """ def get_html(self): '''Show an error to staff. TODO (vshnayder): proper style, divs, etc. ''' # staff get to see all the details return self.system.render_template('module-error.html', { 'staff_access': True, 'data': self.contents, 'error': self.error_msg, }) class NonStaffErrorModule(ErrorFields, XModule): """ Module that gets shown to students when there has been an error while loading or rendering other modules """ def get_html(self): '''Show an error to a student. TODO (vshnayder): proper style, divs, etc. ''' # staff get to see all the details return self.system.render_template('module-error.html', { 'staff_access': False, 'data': "", 'error': "", }) class ErrorDescriptor(ErrorFields, XModuleDescriptor): """ Module that provides a raw editing view of broken xml. """ module_class = ErrorModule def get_html(self): return u'' @classmethod def _construct(cls, system, contents, error_msg, location): if error_msg is None: # this string is not marked for translation because we don't have # access to the user context, and this will only be seen by staff error_msg = 'Error not available' if location.category == 'error': location = location.replace( # Pick a unique url_name -- the sha1 hash of the contents. # NOTE: We could try to pull out the url_name of the errored descriptor, # but url_names aren't guaranteed to be unique between descriptor types, # and ErrorDescriptor can wrap any type. When the wrapped module is fixed, # it will be written out with the original url_name. name=hashlib.sha1(contents.encode('utf8')).hexdigest() ) # real metadata stays in the content, but add a display name field_data = DictFieldData({ 'error_msg': unicode(error_msg), 'contents': contents, 'location': location, 'category': 'error' }) return system.construct_xblock_from_class( cls, # The error module doesn't use scoped data, and thus doesn't need # real scope keys ScopeIds(None, 'error', location, location), field_data, ) def get_context(self): return { 'module': self, 'data': self.contents, } @classmethod def from_json(cls, json_data, system, location, error_msg='Error not available'): try: json_string = json.dumps(json_data, skipkeys=False, indent=4, cls=EdxJSONEncoder) except: # pylint: disable=bare-except json_string = repr(json_data) return cls._construct( system, json_string, error_msg, location=location ) @classmethod def from_descriptor(cls, descriptor, error_msg=None): return cls._construct( descriptor.runtime, str(descriptor), error_msg, location=descriptor.location, ) @classmethod def from_xml(cls, xml_data, system, id_generator, # pylint: disable=arguments-differ error_msg=None): '''Create an instance of this descriptor from the supplied data. Does not require that xml_data be parseable--just stores it and exports as-is if not. Takes an extra, optional, parameter--the error that caused an issue. (should be a string, or convert usefully into one). ''' try: # If this is already an error tag, don't want to re-wrap it. xml_obj = etree.fromstring(xml_data) if xml_obj.tag == 'error': xml_data = xml_obj.text error_node = xml_obj.find('error_msg') if error_node is not None: error_msg = error_node.text else: error_msg = None except etree.XMLSyntaxError: # Save the error to display later--overrides other problems error_msg = exc_info_to_str(sys.exc_info()) return cls._construct(system, xml_data, error_msg, location=id_generator.create_definition('error')) def export_to_xml(self, resource_fs): ''' If the definition data is invalid xml, export it wrapped in an "error" tag. If it is valid, export without the wrapper. NOTE: There may still be problems with the valid xml--it could be missing required attributes, could have the wrong tags, refer to missing files, etc. That would just get re-wrapped on import. ''' try: xml = etree.fromstring(self.contents) return etree.tostring(xml, encoding='unicode') except etree.XMLSyntaxError: # still not valid. root = etree.Element('error') root.text = self.contents err_node = etree.SubElement(root, 'error_msg') err_node.text = self.error_msg return etree.tostring(root, encoding='unicode') class NonStaffErrorDescriptor(ErrorDescriptor): """ Module that provides non-staff error messages. """ module_class = NonStaffErrorModule
agpl-3.0
CartoDB/mapnik
scons/scons-local-3.0.1/SCons/Options/PackageOption.py
5
2008
# # Copyright (c) 2001 - 2017 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Options/PackageOption.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog" __doc__ = """Place-holder for the old SCons.Options module hierarchy This is for backwards compatibility. The new equivalent is the Variables/ class hierarchy. These will have deprecation warnings added (some day), and will then be removed entirely (some day). """ import SCons.Variables import SCons.Warnings warned = False def PackageOption(*args, **kw): global warned if not warned: msg = "The PackageOption() function is deprecated; use the PackageVariable() function instead." SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg) warned = True return SCons.Variables.PackageVariable(*args, **kw) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
lgpl-2.1
HiroIshikawa/21playground
thumblelog/myproject/lib/python3.5/site-packages/pip/_vendor/progress/spinner.py
404
1341
# -*- coding: utf-8 -*- # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import unicode_literals from . import Infinite from .helpers import WriteMixin class Spinner(WriteMixin, Infinite): message = '' phases = ('-', '\\', '|', '/') hide_cursor = True def update(self): i = self.index % len(self.phases) self.write(self.phases[i]) class PieSpinner(Spinner): phases = ['◷', '◶', '◵', '◴'] class MoonSpinner(Spinner): phases = ['◑', '◒', '◐', '◓'] class LineSpinner(Spinner): phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻']
mit
TheWylieStCoyote/gnuradio
gr-blocks/python/blocks/qa_peak_detector.py
2
2112
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from gnuradio import gr, gr_unittest, blocks class test_peak_detector(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_01(self): tb = self.tb data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] expected_result = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) src = blocks.vector_source_f(data, False) regen = blocks.peak_detector_fb() dst = blocks.vector_sink_b() tb.connect(src, regen) tb.connect(regen, dst) tb.run() dst_data = dst.data() self.assertEqual(expected_result, dst_data) def test_02(self): tb = self.tb data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] expected_result = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) src = blocks.vector_source_i(data, False) regen = blocks.peak_detector_ib() dst = blocks.vector_sink_b() tb.connect(src, regen) tb.connect(regen, dst) tb.run() dst_data = dst.data() self.assertEqual(expected_result, dst_data) def test_03(self): tb = self.tb data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] expected_result = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) src = blocks.vector_source_s(data, False) regen = blocks.peak_detector_sb() dst = blocks.vector_sink_b() tb.connect(src, regen) tb.connect(regen, dst) tb.run() dst_data = dst.data() self.assertEqual(expected_result, dst_data) if __name__ == '__main__': gr_unittest.run(test_peak_detector, "test_peak_detector.xml")
gpl-3.0
Zhongqilong/mykbengineer
kbe/src/lib/python/Lib/mimetypes.py
83
20735
"""Guess the MIME type of a file. This module defines two useful functions: guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. guess_extension(type, strict=True) -- guess the extension for a given MIME type. It also contains the following, for tuning the behavior: Data: knownfiles -- list of files to parse inited -- flag set when init() has been called suffix_map -- dictionary mapping suffixes to suffixes encodings_map -- dictionary mapping suffixes to encodings types_map -- dictionary mapping suffixes to types Functions: init([files]) -- parse a list of files, default knownfiles (on Windows, the default values are taken from the registry) read_mime_types(file) -- parse one file, return a dictionary or None """ import os import sys import posixpath import urllib.parse try: import winreg as _winreg except ImportError: _winreg = None __all__ = [ "guess_type","guess_extension","guess_all_extensions", "add_type","read_mime_types","init" ] knownfiles = [ "/etc/mime.types", "/etc/httpd/mime.types", # Mac OS X "/etc/httpd/conf/mime.types", # Apache "/etc/apache/mime.types", # Apache 1 "/etc/apache2/mime.types", # Apache 2 "/usr/local/etc/httpd/conf/mime.types", "/usr/local/lib/netscape/mime.types", "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2 "/usr/local/etc/mime.types", # Apache 1.3 ] inited = False _db = None class MimeTypes: """MIME-types datastore. This datastore can handle information from mime.types-style files and supports basic determination of MIME type from a filename or URL, and can guess a reasonable extension given a MIME type. """ def __init__(self, filenames=(), strict=True): if not inited: init() self.encodings_map = encodings_map.copy() self.suffix_map = suffix_map.copy() self.types_map = ({}, {}) # dict for (non-strict, strict) self.types_map_inv = ({}, {}) for (ext, type) in types_map.items(): self.add_type(type, ext, True) for (ext, type) in common_types.items(): self.add_type(type, ext, False) for name in filenames: self.read(name, strict) def add_type(self, type, ext, strict=True): """Add a mapping between a type and an extension. When the extension is already known, the new type will replace the old one. When the type is already known the extension will be added to the list of known extensions. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ self.types_map[strict][ext] = type exts = self.types_map_inv[strict].setdefault(type, []) if ext not in exts: exts.append(ext) def guess_type(self, url, strict=True): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to '.tar.gz'. (This is table-driven too, using the dictionary suffix_map.) Optional `strict' argument when False adds a bunch of commonly found, but non-standard types. """ scheme, url = urllib.parse.splittype(url) if scheme == 'data': # syntax of data URLs: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value # type/subtype defaults to "text/plain" comma = url.find(',') if comma < 0: # bad data URL return None, None semi = url.find(';', 0, comma) if semi >= 0: type = url[:semi] else: type = url[:comma] if '=' in type or '/' not in type: type = 'text/plain' return type, None # never compressed, so encoding is None base, ext = posixpath.splitext(url) while ext in self.suffix_map: base, ext = posixpath.splitext(base + self.suffix_map[ext]) if ext in self.encodings_map: encoding = self.encodings_map[ext] base, ext = posixpath.splitext(base) else: encoding = None types_map = self.types_map[True] if ext in types_map: return types_map[ext], encoding elif ext.lower() in types_map: return types_map[ext.lower()], encoding elif strict: return None, encoding types_map = self.types_map[False] if ext in types_map: return types_map[ext], encoding elif ext.lower() in types_map: return types_map[ext.lower()], encoding else: return None, encoding def guess_all_extensions(self, type, strict=True): """Guess the extensions for a file based on its MIME type. Return value is a list of strings giving the possible filename extensions, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ type = type.lower() extensions = self.types_map_inv[True].get(type, []) if not strict: for ext in self.types_map_inv[False].get(type, []): if ext not in extensions: extensions.append(ext) return extensions def guess_extension(self, type, strict=True): """Guess the extension for a file based on its MIME type. Return value is a string giving a filename extension, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ extensions = self.guess_all_extensions(type, strict) if not extensions: return None return extensions[0] def read(self, filename, strict=True): """ Read a single mime.types-format file, specified by pathname. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ with open(filename, encoding='utf-8') as fp: self.readfp(fp, strict) def readfp(self, fp, strict=True): """ Read a single mime.types-format file. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ while 1: line = fp.readline() if not line: break words = line.split() for i in range(len(words)): if words[i][0] == '#': del words[i:] break if not words: continue type, suffixes = words[0], words[1:] for suff in suffixes: self.add_type(type, '.' + suff, strict) def read_windows_registry(self, strict=True): """ Load the MIME types database from Windows registry. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ # Windows only if not _winreg: return def enum_types(mimedb): i = 0 while True: try: ctype = _winreg.EnumKey(mimedb, i) except EnvironmentError: break else: yield ctype i += 1 with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr: for subkeyname in enum_types(hkcr): try: with _winreg.OpenKey(hkcr, subkeyname) as subkey: # Only check file extensions if not subkeyname.startswith("."): continue # raises EnvironmentError if no 'Content Type' value mimetype, datatype = _winreg.QueryValueEx( subkey, 'Content Type') if datatype != _winreg.REG_SZ: continue self.add_type(mimetype, subkeyname, strict) except EnvironmentError: continue def guess_type(url, strict=True): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to ".tar.gz". (This is table-driven too, using the dictionary suffix_map). Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_type(url, strict) def guess_all_extensions(type, strict=True): """Guess the extensions for a file based on its MIME type. Return value is a list of strings giving the possible filename extensions, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_all_extensions(type, strict) def guess_extension(type, strict=True): """Guess the extension for a file based on its MIME type. Return value is a string giving a filename extension, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_extension(type, strict) def add_type(type, ext, strict=True): """Add a mapping between a type and an extension. When the extension is already known, the new type will replace the old one. When the type is already known the extension will be added to the list of known extensions. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ if _db is None: init() return _db.add_type(type, ext, strict) def init(files=None): global suffix_map, types_map, encodings_map, common_types global inited, _db inited = True # so that MimeTypes.__init__() doesn't call us again db = MimeTypes() if files is None: if _winreg: db.read_windows_registry() files = knownfiles for file in files: if os.path.isfile(file): db.read(file) encodings_map = db.encodings_map suffix_map = db.suffix_map types_map = db.types_map[True] common_types = db.types_map[False] # Make the DB a global variable now that it is fully initialized _db = db def read_mime_types(file): try: f = open(file) except OSError: return None with f: db = MimeTypes() db.readfp(f, True) return db.types_map[True] def _default_mime_types(): global suffix_map global encodings_map global types_map global common_types suffix_map = { '.svgz': '.svg.gz', '.tgz': '.tar.gz', '.taz': '.tar.gz', '.tz': '.tar.gz', '.tbz2': '.tar.bz2', '.txz': '.tar.xz', } encodings_map = { '.gz': 'gzip', '.Z': 'compress', '.bz2': 'bzip2', '.xz': 'xz', } # Before adding new types, make sure they are either registered with IANA, # at http://www.iana.org/assignments/media-types # or extensions, i.e. using the x- prefix # If you add to these, please keep them sorted! types_map = { '.a' : 'application/octet-stream', '.ai' : 'application/postscript', '.aif' : 'audio/x-aiff', '.aifc' : 'audio/x-aiff', '.aiff' : 'audio/x-aiff', '.au' : 'audio/basic', '.avi' : 'video/x-msvideo', '.bat' : 'text/plain', '.bcpio' : 'application/x-bcpio', '.bin' : 'application/octet-stream', '.bmp' : 'image/x-ms-bmp', '.c' : 'text/plain', # Duplicates :( '.cdf' : 'application/x-cdf', '.cdf' : 'application/x-netcdf', '.cpio' : 'application/x-cpio', '.csh' : 'application/x-csh', '.css' : 'text/css', '.dll' : 'application/octet-stream', '.doc' : 'application/msword', '.dot' : 'application/msword', '.dvi' : 'application/x-dvi', '.eml' : 'message/rfc822', '.eps' : 'application/postscript', '.etx' : 'text/x-setext', '.exe' : 'application/octet-stream', '.gif' : 'image/gif', '.gtar' : 'application/x-gtar', '.h' : 'text/plain', '.hdf' : 'application/x-hdf', '.htm' : 'text/html', '.html' : 'text/html', '.ico' : 'image/vnd.microsoft.icon', '.ief' : 'image/ief', '.jpe' : 'image/jpeg', '.jpeg' : 'image/jpeg', '.jpg' : 'image/jpeg', '.js' : 'application/javascript', '.ksh' : 'text/plain', '.latex' : 'application/x-latex', '.m1v' : 'video/mpeg', '.m3u' : 'application/vnd.apple.mpegurl', '.m3u8' : 'application/vnd.apple.mpegurl', '.man' : 'application/x-troff-man', '.me' : 'application/x-troff-me', '.mht' : 'message/rfc822', '.mhtml' : 'message/rfc822', '.mif' : 'application/x-mif', '.mov' : 'video/quicktime', '.movie' : 'video/x-sgi-movie', '.mp2' : 'audio/mpeg', '.mp3' : 'audio/mpeg', '.mp4' : 'video/mp4', '.mpa' : 'video/mpeg', '.mpe' : 'video/mpeg', '.mpeg' : 'video/mpeg', '.mpg' : 'video/mpeg', '.ms' : 'application/x-troff-ms', '.nc' : 'application/x-netcdf', '.nws' : 'message/rfc822', '.o' : 'application/octet-stream', '.obj' : 'application/octet-stream', '.oda' : 'application/oda', '.p12' : 'application/x-pkcs12', '.p7c' : 'application/pkcs7-mime', '.pbm' : 'image/x-portable-bitmap', '.pdf' : 'application/pdf', '.pfx' : 'application/x-pkcs12', '.pgm' : 'image/x-portable-graymap', '.pl' : 'text/plain', '.png' : 'image/png', '.pnm' : 'image/x-portable-anymap', '.pot' : 'application/vnd.ms-powerpoint', '.ppa' : 'application/vnd.ms-powerpoint', '.ppm' : 'image/x-portable-pixmap', '.pps' : 'application/vnd.ms-powerpoint', '.ppt' : 'application/vnd.ms-powerpoint', '.ps' : 'application/postscript', '.pwz' : 'application/vnd.ms-powerpoint', '.py' : 'text/x-python', '.pyc' : 'application/x-python-code', '.pyo' : 'application/x-python-code', '.qt' : 'video/quicktime', '.ra' : 'audio/x-pn-realaudio', '.ram' : 'application/x-pn-realaudio', '.ras' : 'image/x-cmu-raster', '.rdf' : 'application/xml', '.rgb' : 'image/x-rgb', '.roff' : 'application/x-troff', '.rtx' : 'text/richtext', '.sgm' : 'text/x-sgml', '.sgml' : 'text/x-sgml', '.sh' : 'application/x-sh', '.shar' : 'application/x-shar', '.snd' : 'audio/basic', '.so' : 'application/octet-stream', '.src' : 'application/x-wais-source', '.sv4cpio': 'application/x-sv4cpio', '.sv4crc' : 'application/x-sv4crc', '.svg' : 'image/svg+xml', '.swf' : 'application/x-shockwave-flash', '.t' : 'application/x-troff', '.tar' : 'application/x-tar', '.tcl' : 'application/x-tcl', '.tex' : 'application/x-tex', '.texi' : 'application/x-texinfo', '.texinfo': 'application/x-texinfo', '.tif' : 'image/tiff', '.tiff' : 'image/tiff', '.tr' : 'application/x-troff', '.tsv' : 'text/tab-separated-values', '.txt' : 'text/plain', '.ustar' : 'application/x-ustar', '.vcf' : 'text/x-vcard', '.wav' : 'audio/x-wav', '.wiz' : 'application/msword', '.wsdl' : 'application/xml', '.xbm' : 'image/x-xbitmap', '.xlb' : 'application/vnd.ms-excel', # Duplicates :( '.xls' : 'application/excel', '.xls' : 'application/vnd.ms-excel', '.xml' : 'text/xml', '.xpdl' : 'application/xml', '.xpm' : 'image/x-xpixmap', '.xsl' : 'application/xml', '.xwd' : 'image/x-xwindowdump', '.zip' : 'application/zip', } # These are non-standard types, commonly found in the wild. They will # only match if strict=0 flag is given to the API methods. # Please sort these too common_types = { '.jpg' : 'image/jpg', '.mid' : 'audio/midi', '.midi': 'audio/midi', '.pct' : 'image/pict', '.pic' : 'image/pict', '.pict': 'image/pict', '.rtf' : 'application/rtf', '.xul' : 'text/xul' } _default_mime_types() if __name__ == '__main__': import getopt USAGE = """\ Usage: mimetypes.py [options] type Options: --help / -h -- print this message and exit --lenient / -l -- additionally search of some common, but non-standard types. --extension / -e -- guess extension instead of type More than one type argument may be given. """ def usage(code, msg=''): print(USAGE) if msg: print(msg) sys.exit(code) try: opts, args = getopt.getopt(sys.argv[1:], 'hle', ['help', 'lenient', 'extension']) except getopt.error as msg: usage(1, msg) strict = 1 extension = 0 for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-l', '--lenient'): strict = 0 elif opt in ('-e', '--extension'): extension = 1 for gtype in args: if extension: guess = guess_extension(gtype, strict) if not guess: print("I don't know anything about type", gtype) else: print(guess) else: guess, encoding = guess_type(gtype, strict) if not guess: print("I don't know anything about type", gtype) else: print('type:', guess, 'encoding:', encoding)
lgpl-3.0
massot/odoo
addons/web_analytics/__openerp__.py
305
1432
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Google Analytics', 'version': '1.0', 'category': 'Tools', 'complexity': "easy", 'description': """ Google Analytics. ================= Collects web application usage with Google Analytics. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/website-builder', 'depends': ['web'], 'data': [ 'views/web_analytics.xml', ], 'installable': True, 'active': False, }
agpl-3.0
xbmc/atv2
xbmc/lib/libPython/Python/Lib/test/test_augassign.py
34
4338
# Augmented assignment test. x = 2 x += 1 x *= 2 x **= 2 x -= 8 x //= 2 x //= 1 x %= 12 x &= 2 x |= 5 x ^= 1 print x x = [2] x[0] += 1 x[0] *= 2 x[0] **= 2 x[0] -= 8 x[0] //= 2 x[0] //= 2 x[0] %= 12 x[0] &= 2 x[0] |= 5 x[0] ^= 1 print x x = {0: 2} x[0] += 1 x[0] *= 2 x[0] **= 2 x[0] -= 8 x[0] //= 2 x[0] //= 1 x[0] %= 12 x[0] &= 2 x[0] |= 5 x[0] ^= 1 print x[0] x = [1,2] x += [3,4] x *= 2 print x x = [1, 2, 3] y = x x[1:2] *= 2 y[1:2] += [1] print x print x is y class aug_test: def __init__(self, value): self.val = value def __radd__(self, val): return self.val + val def __add__(self, val): return aug_test(self.val + val) class aug_test2(aug_test): def __iadd__(self, val): self.val = self.val + val return self class aug_test3(aug_test): def __iadd__(self, val): return aug_test3(self.val + val) x = aug_test(1) y = x x += 10 print isinstance(x, aug_test) print y is not x print x.val x = aug_test2(2) y = x x += 10 print y is x print x.val x = aug_test3(3) y = x x += 10 print isinstance(x, aug_test3) print y is not x print x.val class testall: def __add__(self, val): print "__add__ called" def __radd__(self, val): print "__radd__ called" def __iadd__(self, val): print "__iadd__ called" return self def __sub__(self, val): print "__sub__ called" def __rsub__(self, val): print "__rsub__ called" def __isub__(self, val): print "__isub__ called" return self def __mul__(self, val): print "__mul__ called" def __rmul__(self, val): print "__rmul__ called" def __imul__(self, val): print "__imul__ called" return self def __div__(self, val): print "__div__ called" def __rdiv__(self, val): print "__rdiv__ called" def __idiv__(self, val): print "__idiv__ called" return self def __floordiv__(self, val): print "__floordiv__ called" return self def __ifloordiv__(self, val): print "__ifloordiv__ called" return self def __rfloordiv__(self, val): print "__rfloordiv__ called" return self def __truediv__(self, val): print "__truediv__ called" return self def __itruediv__(self, val): print "__itruediv__ called" return self def __mod__(self, val): print "__mod__ called" def __rmod__(self, val): print "__rmod__ called" def __imod__(self, val): print "__imod__ called" return self def __pow__(self, val): print "__pow__ called" def __rpow__(self, val): print "__rpow__ called" def __ipow__(self, val): print "__ipow__ called" return self def __or__(self, val): print "__or__ called" def __ror__(self, val): print "__ror__ called" def __ior__(self, val): print "__ior__ called" return self def __and__(self, val): print "__and__ called" def __rand__(self, val): print "__rand__ called" def __iand__(self, val): print "__iand__ called" return self def __xor__(self, val): print "__xor__ called" def __rxor__(self, val): print "__rxor__ called" def __ixor__(self, val): print "__ixor__ called" return self def __rshift__(self, val): print "__rshift__ called" def __rrshift__(self, val): print "__rrshift__ called" def __irshift__(self, val): print "__irshift__ called" return self def __lshift__(self, val): print "__lshift__ called" def __rlshift__(self, val): print "__rlshift__ called" def __ilshift__(self, val): print "__ilshift__ called" return self x = testall() x + 1 1 + x x += 1 x - 1 1 - x x -= 1 x * 1 1 * x x *= 1 if 1/2 == 0: x / 1 1 / x x /= 1 else: # True division is in effect, so "/" doesn't map to __div__ etc; # but the canned expected-output file requires that those get called. x.__div__(1) x.__rdiv__(1) x.__idiv__(1) x // 1 1 // x x //= 1 x % 1 1 % x x %= 1 x ** 1 1 ** x x **= 1 x | 1 1 | x x |= 1 x & 1 1 & x x &= 1 x ^ 1 1 ^ x x ^= 1 x >> 1 1 >> x x >>= 1 x << 1 1 << x x <<= 1
gpl-2.0
kadircet/CENG
783/HW1/cs231n/classifiers/softmax.py
1
2983
import numpy as np from random import shuffle def softmax_loss_naive(W, X, y, reg): """ Softmax loss function, naive implementation (with loops) Inputs: - W: C x D array of weights - X: D x N array of data. Data are D-dimensional columns - y: 1-dimensional array of length N with labels 0...K-1, for K classes - reg: (float) regularization strength Returns: a tuple of: - loss as single float - gradient with respect to weights W, an array of same size as W """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# C = W.shape[0] D = W.shape[1] N = X.shape[1] for i in xrange(N): scores = W.dot(X[:,i]) scores -= np.max(scores) loss += -scores[y[i]] + np.log(np.sum(np.exp(scores))) scores = np.exp(scores) scores /= np.sum(scores) scores[y[i]] -= 1 for j in xrange(C): dW[j,:] += scores[j] * X[:,i] loss/=N loss+=.5*reg*np.sum(W**2) dW/=N dW+=reg*W ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW def softmax_loss_vectorized(W, X, y, reg): """ Softmax loss function, vectorized version. Inputs and outputs are the same as softmax_loss_naive. """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using no explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# C = W.shape[0] D = W.shape[1] N = X.shape[1] scores = W.dot(X) scores -= np.max(scores) scores = np.exp(scores) scores /= np.sum(scores, axis=0) loss = -np.sum(np.log(scores[y, np.arange(N)]))/N + .5*reg*np.sum(W**2) scores[y, np.arange(N)] -= 1 dW = np.dot(scores,X.T)/N + reg*W ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
gpl-3.0
FabriceSalvaire/PyValentina
Patro/TextileTechnology/Database.py
1
2140
#################################################################################################### # # Patro - A Python library to make patterns for fashion design # Copyright (C) 2019 Fabrice Salvaire # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # #################################################################################################### __all__ = ['Database'] #################################################################################################### from pathlib import Path import yaml #################################################################################################### class Database: DEFAULT_DATA_DIRECTORY = Path(__file__).resolve().parent.joinpath('data') DEFAULT_DATA_FILENAME = None ############################################## def __init__(self): self._items = {} ############################################## def __len__(self): return len(self._items) def __iter__(self): return iter(self._items.values()) def __getitem__(self, name): return self._items[name] ############################################## def add(self, obj): if obj.name not in self._items: self._items[obj.name] = obj ############################################## def load(self, yaml_path=None): if yaml_path is None: yaml_path = self.DEFAULT_DATA_DIRECTORY.joinpath(self.DEFAULT_DATA_FILENAME) return yaml.load(open(yaml_path, 'r'), Loader=yaml.SafeLoader)
gpl-3.0
ReedWood/fdmb
tests/testEMFit.py
1
1780
import numpy as np import fdmb import itertools # Header print('fdmb test for the EM fit implementation') print('Documentation on the data can be found in ar2Data.README', end='\n\n') # Model parameters obs = np.loadtxt('ar2DataObs.dat') nData = 5000 dim = 2 order = 2 aThresh = 1e-8 pThresh = 1e-10 maxIter = np.int32(1e6) estError = False # Expected result targetA = np.array([[[1.30550248, 0.28797118], [-0.02384243, 1.71119078]], [[-8.02262977e-01, 5.75953420e-03], [-4.26190044e-04, -7.90536237e-01]]]) targetQ = np.array([[1.09429302, -0.00408784], [-0.00408784, 0.88341843]]) targetR = np.array([[10.8341601, -0.11942269], [-0.11942269, 11.0743018]]) # Fit and results print('Start fit') arCoeff = fdmb.emfit(obs, nData, dim, order, aThresh, maxIter, pThresh, estError) estA = np.asanyarray(arCoeff[0]) estQ = arCoeff[1] estR = arCoeff[2] # Compare expectation to actual result passA = np.all((np.around(targetA, 8)-np.around(estA, 8)) < np.finfo(float).eps) passQ = np.all((np.around(targetQ, 8)-np.around(estQ, 8)) < np.finfo(float).eps) passR = np.all((np.around(targetR, 8)-np.around(estR, 8)) < np.finfo(float).eps) labels = ['A: Transition matrix', 'Q: Driving noise covariance', 'R: Observational noise covariance'] failIdx = [not passA, not passQ, not passR] if not np.any(failIdx): print('EM fit installation OK') else: print('EM fit test failed for') failMsg = list(itertools.compress(data=labels, selectors=failIdx)) for msg in failMsg: print('\t%s' % msg) print('\nResult of the fit') print(labels[0]) print(estA, end='\n\n') print(labels[1]) print(estQ, end='\n\n') print(labels[2]) print(estR)
gpl-3.0
apigee/henchman
modules/curl/curl/requests/requests/packages/chardet/langgreekmodel.py
2763
12628
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: Latin7_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) win1253_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) # Model Table: # total sequences: 100% # first 512 sequences: 98.2851% # first 1024 sequences:1.7001% # rest sequences: 0.0359% # negative sequences: 0.0148% GreekLangModel = ( 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0, 2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0, 2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0, 2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0, 0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0, 3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0, 2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0, 0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0, 0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2, 0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0, 0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2, 0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0, 0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2, 0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2, 0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0, 0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0, 0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0, 0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2, 0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2, 0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0, 0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0, 0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1, 0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0, 0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0, 0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) Latin7GreekModel = { 'charToOrderMap': Latin7_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-7" } Win1253GreekModel = { 'charToOrderMap': win1253_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "windows-1253" } # flake8: noqa
bsd-2-clause
vipmike007/avocado-vt
virttest/remote_commander/remote_master.py
1
10254
#!/usr/bin/env python ''' Created on Dec 6, 2013 :author: jzupka ''' import sys import time import inspect import remote_interface import messenger def getsource(obj): return inspect.getsource(obj) def wait_timeout(timeout): if timeout is None: while 1: yield True else: end_time = time.time() + timeout while time.time() < end_time: yield True class CmdMaster(object): """ Representation of BaseCmd on master side. """ def __init__(self, commander, name, *args, **kargs): """ :params commander: Commander from which was command started. :params name: Name parsed to string representation :type name: [str, str, str] :parmas args: list to arguments to cmd. :type args: [] :params kargs: {} """ self._basecmd = remote_interface.BaseCmd(name, *args, **kargs) self.commander = commander self._stdout = "" self._stderr = "" self._results_cnt = 0 self._stdout_cnt = 0 self._stderr_cnt = 0 def getbasecmd(self): """ Property basecmd getter """ self._results_cnt = 0 return self._basecmd def setbasecmd(self, value): """ Property basecmd setter _resuls_cnt identify if value was change from last reading. """ self._basecmd = value self._results_cnt += 1 basecmd = property(getbasecmd, setbasecmd) def getstdout(self): """ Property stdout getter """ self._stdout_cnt = 0 return self._stdout def setstdout(self, value): """ Property stdout setter _stdout_cnt identify if value was change from last reading. """ self._stdout = value self._stdout_cnt += 1 stdout = property(getstdout, setstdout) def getstderr(self): """ Property stderr getter """ self._stderr_cnt = 0 return self._stderr def setstderr(self, value): """ Property stderr setter _stderr_cnt identify if value was change from last reading. """ self._stderr = value self._stderr_cnt += 1 stderr = property(getstderr, setstderr) def send_stdin(self, msg): """ Send data to stdin """ self.commander.manage.send_msg(msg, self.basecmd.cmd_id) def wait(self): """ Wait until command return results. """ return self.commander.wait(self) def wait_response(self, timeout=None): """ Wait until command return any cmd. """ self.commander.wait_response(self, timeout) def __getattr__(self, name): """ Shortcut to encapsulated basecmd. """ if name in ["__getstate__", "__setstate__", "__slots__"]: raise AttributeError() return getattr(self.basecmd, name) def set_commander(self, commander): """ For nohup commands it allows connect cmd to new created commander. """ self.commander = commander if self not in commander.cmds: commander.cmds[self.cmd_id] = self self.commander.manage.register_cmd(self.basecmd, remote_interface.BaseCmd.single_cmd_id) class CmdEncapsulation(object): """ Class parse command name cmd.nohup.shell -> ["nohup", "shell"] """ def __init__(self, master, obj_name, name): self.master = master if obj_name is None: self.name = [name] else: self.name = obj_name + [name] self.cmd = None def __getattr__(self, name): return CmdEncapsulation(self.master, self.name, name) def __call__(self, *args, **kargs): """ Call commander with specific command. """ self.cmd = CmdMaster(self.master, self.name, *args, **kargs) return self.master.cmd(self.cmd) class CmdTimeout(remote_interface.MessengerError): """ Raised when waiting for cmd exceeds time define by timeout. """ def __init__(self, msg): super(CmdTimeout, self).__init__(msg) def __str__(self): return "Commander Timeout %s" % (self.msg) class Commander(object): """ Commander representation for transfer over network. """ __slots__ = [] class CommanderMaster(messenger.Messenger): """ Class commander master is responsible for communication with commander slave. It invoke commands to slave part and receive messages from them. For communication is used only stdin and stdout which are streams from slave part. """ def __init__(self, stdin, stdout, debug=False): """ :type stdin: IOWrapper with implemented write function. :type stout: IOWrapper with implemented read function. """ super(CommanderMaster, self).__init__(stdin, stdout) self.cmds = {} self.debug = debug self.flush_stdin() self.write_msg("start") succ, msg = self.read_msg() if not succ or msg != "Started": raise remote_interface.CommanderError("Remote commander" " not started.") def close(self): try: self.manage.exit() except Exception: pass super(CommanderMaster, self).close() def __getattr__(self, name): """ Start parsing unknown attribute in cmd. """ if name in ["__getstate__", "__setstate__", "__slots__"]: raise AttributeError() return CmdEncapsulation(self, None, name) def __deepcopy__(self, memo): """ Replace deepcopy by substituting by network Commander version. """ result = Commander.__new__(Commander) memo[id(self)] = result return result def listen_streams(self, cmd): """ Listen on all streams included in Commander commands. """ if isinstance(cmd, remote_interface.StdStream): if (self.debug): print cmd.msg if cmd.isCmdMsg(): if isinstance(cmd, remote_interface.StdOut): self.cmds[cmd.cmd_id].stdout += cmd.msg elif isinstance(cmd, remote_interface.StdErr): self.cmds[cmd.cmd_id].stderr += cmd.msg else: if isinstance(cmd, remote_interface.StdOut): sys.stdout.write(cmd.msg) elif isinstance(cmd, remote_interface.StdErr): sys.stderr.write(cmd.msg) def listen_errors(self, cmd): """ Listen for errors raised from slave part of commander. """ if isinstance(cmd, (Exception, remote_interface.CommanderError, remote_interface.MessengerError)): raise cmd def listen_cmds(self, cmd): """ Manage basecmds from slave side. """ if isinstance(cmd, remote_interface.BaseCmd): if (self.debug): print cmd.func, cmd.results, cmd._finished if isinstance(cmd.results, Exception): raise cmd.results if cmd.cmd_id in self.cmds: self.cmds[cmd.cmd_id].basecmd.update(cmd) self.cmds[cmd.cmd_id].basecmd.update_cmd_hash(cmd) def listen_messenger(self, timeout=60): """ Wait for msg from slave side and take care about them. """ succ, r_cmd = self.read_msg(timeout) if succ is None: return r_cmd if not succ: raise remote_interface.CommanderError("Remote process died.") self.listen_errors(r_cmd) self.listen_streams(r_cmd) self.listen_cmds(r_cmd) return r_cmd def cmd(self, cmd, timeout=60): """ Invoke command on client side. """ self.cmds[cmd.basecmd.cmd_id] = cmd self.write_msg(cmd.basecmd) while (1): if cmd.basecmd.func[0] not in ["async", "nohup"]: # If not async wait for finish. self.wait(cmd, timeout) else: ancmd = self.wait_response(cmd, timeout) cmd.update_cmd_hash(ancmd) return cmd def wait(self, cmd, timeout=60): """ Wait until command return results. """ if cmd.cmd_id not in self.cmds: return cmd m_cmd = self.cmds[cmd.cmd_id] if m_cmd.is_finished(): return m_cmd r_cmd = None time_step = None if timeout is not None: time_step = timeout / 10.0 w = wait_timeout(timeout) for _ in w: r_cmd = self.listen_messenger(time_step) if isinstance(r_cmd, remote_interface.BaseCmd): if (self.debug): print m_cmd._stdout if r_cmd is not None and r_cmd == m_cmd.basecmd: # If command which we waiting for. if r_cmd.is_finished(): del self.cmds[m_cmd.basecmd.cmd_id] m_cmd.basecmd.update(r_cmd) return m_cmd m_cmd.basecmd.update(r_cmd) m_cmd.basecmd.update_cmd_hash(r_cmd) if r_cmd is None: raise CmdTimeout("%ss during %s" % (timeout, str(cmd))) def wait_response(self, cmd, timeout=60): """ Wait until command return any cmd. """ if cmd.cmd_id not in self.cmds: return cmd if cmd.is_finished() or cmd._stdout_cnt or cmd._stderr_cnt: return cmd m_cmd = self.cmds[cmd.cmd_id] r_cmd = None time_step = None if timeout is not None: time_step = timeout / 10.0 w = wait_timeout(timeout) while (w.next()): r_cmd = self.listen_messenger(time_step) if r_cmd is not None and r_cmd == m_cmd.basecmd: return m_cmd if r_cmd is None: raise CmdTimeout(timeout)
gpl-2.0
drewandersonnz/openshift-tools
ansible/roles/lib_oa_openshift/src/test/unit/test_oc_adm_router.py
15
18749
#!/usr/bin/env python ''' Unit tests for oc adm router ''' import os import six import sys import unittest import mock # Removing invalid variable names for tests so that I can # keep them brief # pylint: disable=invalid-name,no-name-in-module # Disable import-error b/c our libraries aren't loaded in jenkins # pylint: disable=import-error # place class in our python path module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501 sys.path.insert(0, module_path) from oc_adm_router import Router, locate_oc_binary # noqa: E402 # pylint: disable=too-many-public-methods class RouterTest(unittest.TestCase): ''' Test class for Router ''' dry_run = '''{ "kind": "List", "apiVersion": "v1", "metadata": {}, "items": [ { "kind": "ServiceAccount", "apiVersion": "v1", "metadata": { "name": "router", "creationTimestamp": null } }, { "kind": "ClusterRoleBinding", "apiVersion": "v1", "metadata": { "name": "router-router-role", "creationTimestamp": null }, "userNames": [ "system:serviceaccount:default:router" ], "groupNames": null, "subjects": [ { "kind": "ServiceAccount", "namespace": "default", "name": "router" } ], "roleRef": { "kind": "ClusterRole", "name": "system:router" } }, { "kind": "DeploymentConfig", "apiVersion": "v1", "metadata": { "name": "router", "creationTimestamp": null, "labels": { "router": "router" } }, "spec": { "strategy": { "type": "Rolling", "rollingParams": { "maxUnavailable": "25%", "maxSurge": 0 }, "resources": {} }, "triggers": [ { "type": "ConfigChange" } ], "replicas": 2, "test": false, "selector": { "router": "router" }, "template": { "metadata": { "creationTimestamp": null, "labels": { "router": "router" } }, "spec": { "volumes": [ { "name": "server-certificate", "secret": { "secretName": "router-certs" } } ], "containers": [ { "name": "router", "image": "registry.redhat.io/openshift3/ose-haproxy-router:v3.5.0.39", "ports": [ { "containerPort": 80 }, { "containerPort": 443 }, { "name": "stats", "containerPort": 1936, "protocol": "TCP" } ], "env": [ { "name": "DEFAULT_CERTIFICATE_DIR", "value": "/etc/pki/tls/private" }, { "name": "ROUTER_EXTERNAL_HOST_HOSTNAME" }, { "name": "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER" }, { "name": "ROUTER_EXTERNAL_HOST_HTTP_VSERVER" }, { "name": "ROUTER_EXTERNAL_HOST_INSECURE", "value": "false" }, { "name": "ROUTER_EXTERNAL_HOST_INTERNAL_ADDRESS" }, { "name": "ROUTER_EXTERNAL_HOST_PARTITION_PATH" }, { "name": "ROUTER_EXTERNAL_HOST_PASSWORD" }, { "name": "ROUTER_EXTERNAL_HOST_PRIVKEY", "value": "/etc/secret-volume/router.pem" }, { "name": "ROUTER_EXTERNAL_HOST_USERNAME" }, { "name": "ROUTER_EXTERNAL_HOST_VXLAN_GW_CIDR" }, { "name": "ROUTER_SERVICE_HTTPS_PORT", "value": "443" }, { "name": "ROUTER_SERVICE_HTTP_PORT", "value": "80" }, { "name": "ROUTER_SERVICE_NAME", "value": "router" }, { "name": "ROUTER_SERVICE_NAMESPACE", "value": "default" }, { "name": "ROUTER_SUBDOMAIN" }, { "name": "STATS_PASSWORD", "value": "eSfUICQyyr" }, { "name": "STATS_PORT", "value": "1936" }, { "name": "STATS_USERNAME", "value": "admin" } ], "resources": { "requests": { "cpu": "100m", "memory": "256Mi" } }, "volumeMounts": [ { "name": "server-certificate", "readOnly": true, "mountPath": "/etc/pki/tls/private" } ], "livenessProbe": { "httpGet": { "path": "/healthz", "port": 1936, "host": "localhost" }, "initialDelaySeconds": 10 }, "readinessProbe": { "httpGet": { "path": "/healthz", "port": 1936, "host": "localhost" }, "initialDelaySeconds": 10 }, "imagePullPolicy": "IfNotPresent" } ], "nodeSelector": { "type": "infra" }, "serviceAccountName": "router", "serviceAccount": "router", "hostNetwork": true, "securityContext": {} } } }, "status": { "latestVersion": 0, "observedGeneration": 0, "replicas": 0, "updatedReplicas": 0, "availableReplicas": 0, "unavailableReplicas": 0 } }, { "kind": "Service", "apiVersion": "v1", "metadata": { "name": "router", "creationTimestamp": null, "labels": { "router": "router" }, "annotations": { "service.alpha.openshift.io/serving-cert-secret-name": "router-certs" } }, "spec": { "ports": [ { "name": "80-tcp", "port": 80, "targetPort": 80 }, { "name": "443-tcp", "port": 443, "targetPort": 443 }, { "name": "1936-tcp", "protocol": "TCP", "port": 1936, "targetPort": 1936 } ], "selector": { "router": "router" } }, "status": { "loadBalancer": {} } } ] }''' @mock.patch('oc_adm_router.locate_oc_binary') @mock.patch('oc_adm_router.Utils._write') @mock.patch('oc_adm_router.Utils.create_tmpfile_copy') @mock.patch('oc_adm_router.Router._run') def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary): ''' Testing a create ''' params = {'state': 'present', 'debug': False, 'namespace': 'default', 'name': 'router', 'default_cert': None, 'cert_file': None, 'key_file': None, 'cacert_file': None, 'labels': {"router": "router", "another-label": "val"}, 'ports': ['80:80', '443:443'], 'images': None, 'latest_images': None, 'clusterip': None, 'portalip': None, 'session_affinity': None, 'service_type': None, 'kubeconfig': '/etc/origin/master/admin.kubeconfig', 'replicas': 2, 'selector': 'type=infra', 'service_account': 'router', 'router_type': None, 'host_network': None, 'extended_validation': True, 'external_host': None, 'external_host_vserver': None, 'external_host_insecure': False, 'external_host_partition_path': None, 'external_host_username': None, 'external_host_password': None, 'external_host_private_key': None, 'stats_user': None, 'stats_password': None, 'stats_port': 1936, 'edits': []} mock_cmd.side_effect = [ (1, '', 'Error from server (NotFound): deploymentconfigs "router" not found'), (1, '', 'Error from server (NotFound): service "router" not found'), (1, '', 'Error from server (NotFound): serviceaccount "router" not found'), (1, '', 'Error from server (NotFound): secret "router-certs" not found'), (1, '', 'Error from server (NotFound): clsuterrolebinding "router-router-role" not found'), (0, RouterTest.dry_run, ''), (0, '', ''), (0, '', ''), (0, '', ''), (0, '', ''), (0, '', ''), ] mock_tmpfile_copy.side_effect = [ '/tmp/mocked_kubeconfig', ] mock_oc_binary.side_effect = [ 'oc', ] results = Router.run_ansible(params, False) self.assertTrue(results['changed']) for result in results['results']['results']: self.assertEqual(result['returncode'], 0) mock_cmd.assert_has_calls([ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'get', 'svc', 'router', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'get', 'sa', 'router', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'adm', 'router', 'router', '--external-host-insecure=False', "--labels=another-label=val,router=router", '--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router', '--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None)]) @unittest.skipIf(six.PY3, 'py2 test only') @mock.patch('os.path.exists') @mock.patch('os.environ.get') def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): ''' Testing binary lookup fallback ''' mock_env_get.side_effect = lambda _v, _d: '' mock_path_exists.side_effect = lambda _: False self.assertEqual(locate_oc_binary(), 'oc') @unittest.skipIf(six.PY3, 'py2 test only') @mock.patch('os.path.exists') @mock.patch('os.environ.get') def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): ''' Testing binary lookup in path ''' oc_bin = '/usr/bin/oc' mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' mock_path_exists.side_effect = lambda f: f == oc_bin self.assertEqual(locate_oc_binary(), oc_bin) @unittest.skipIf(six.PY3, 'py2 test only') @mock.patch('os.path.exists') @mock.patch('os.environ.get') def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): ''' Testing binary lookup in /usr/local/bin ''' oc_bin = '/usr/local/bin/oc' mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' mock_path_exists.side_effect = lambda f: f == oc_bin self.assertEqual(locate_oc_binary(), oc_bin) @unittest.skipIf(six.PY3, 'py2 test only') @mock.patch('os.path.exists') @mock.patch('os.environ.get') def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): ''' Testing binary lookup in ~/bin ''' oc_bin = os.path.expanduser('~/bin/oc') mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' mock_path_exists.side_effect = lambda f: f == oc_bin self.assertEqual(locate_oc_binary(), oc_bin) @unittest.skipIf(six.PY2, 'py3 test only') @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): ''' Testing binary lookup fallback ''' mock_env_get.side_effect = lambda _v, _d: '' mock_shutil_which.side_effect = lambda _f, path=None: None self.assertEqual(locate_oc_binary(), 'oc') @unittest.skipIf(six.PY2, 'py3 test only') @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): ''' Testing binary lookup in path ''' oc_bin = '/usr/bin/oc' mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' mock_shutil_which.side_effect = lambda _f, path=None: oc_bin self.assertEqual(locate_oc_binary(), oc_bin) @unittest.skipIf(six.PY2, 'py3 test only') @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): ''' Testing binary lookup in /usr/local/bin ''' oc_bin = '/usr/local/bin/oc' mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' mock_shutil_which.side_effect = lambda _f, path=None: oc_bin self.assertEqual(locate_oc_binary(), oc_bin) @unittest.skipIf(six.PY2, 'py3 test only') @mock.patch('shutil.which') @mock.patch('os.environ.get') def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): ''' Testing binary lookup in ~/bin ''' oc_bin = os.path.expanduser('~/bin/oc') mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' mock_shutil_which.side_effect = lambda _f, path=None: oc_bin self.assertEqual(locate_oc_binary(), oc_bin)
apache-2.0
agent010101/agent010101.github.io
vendor/bundle/ruby/2.0.0/gems/pygments.rb-0.6.1/vendor/pygments-main/setup.py
36
2931
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Pygments ~~~~~~~~ Pygments is a syntax highlighting package written in Python. It is a generic syntax highlighter for general use in all kinds of software such as forum systems, wikis or other applications that need to prettify source code. Highlights are: * a wide range of common languages and markup formats is supported * special attention is paid to details, increasing quality by a fair amount * support for new languages and formats are added easily * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image \ formats that PIL supports and ANSI sequences * it is usable as a command-line tool and as a library * ... and it highlights even Brainfuck! The `Pygments tip`_ is installable with ``easy_install Pygments==dev``. .. _Pygments tip: http://bitbucket.org/birkenfeld/pygments-main/get/default.zip#egg=Pygments-dev :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ try: from setuptools import setup, find_packages have_setuptools = True except ImportError: try: import ez_setup ez_setup.use_setuptools() from setuptools import setup, find_packages have_setuptools = True except ImportError: from distutils.core import setup def find_packages(*args, **kwargs): return [ 'pygments', 'pygments.lexers', 'pygments.formatters', 'pygments.styles', 'pygments.filters', ] have_setuptools = False if have_setuptools: add_keywords = dict( entry_points = { 'console_scripts': ['pygmentize = pygments.cmdline:main'], }, ) else: add_keywords = dict( scripts = ['pygmentize'], ) setup( name = 'Pygments', version = '2.0pre', url = 'http://pygments.org/', license = 'BSD License', author = 'Georg Brandl', author_email = 'georg@python.org', description = 'Pygments is a syntax highlighting package written in Python.', long_description = __doc__, keywords = 'syntax highlighting', packages = find_packages(exclude=['ez_setup']), platforms = 'any', zip_safe = False, include_package_data = True, classifiers = [ 'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: System Administrators', 'Development Status :: 6 - Mature', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Operating System :: OS Independent', 'Topic :: Text Processing :: Filters', 'Topic :: Utilities', ], **add_keywords )
mit
rossasa/server-tools
auditlog/tests/test_autovacuum.py
14
1651
# -*- coding: utf-8 -*- # © 2016 ABF OSIELL <http://osiell.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). import time from openerp.tests.common import TransactionCase class TestAuditlogAutovacuum(TransactionCase): def setUp(self): super(TestAuditlogAutovacuum, self).setUp() self.groups_model_id = self.env.ref('base.model_res_groups').id self.groups_rule = self.env['auditlog.rule'].create({ 'name': 'testrule for groups', 'model_id': self.groups_model_id, 'log_read': True, 'log_create': True, 'log_write': True, 'log_unlink': True, 'state': 'subscribed', 'log_type': 'full', }) def tearDown(self): self.groups_rule.unlink() super(TestAuditlogAutovacuum, self).tearDown() def test_autovacuum(self): log_model = self.env['auditlog.log'] autovacuum_model = self.env['auditlog.autovacuum'] group = self.env['res.groups'].create({ 'name': 'testgroup1', }) nb_logs = log_model.search_count([ ('model_id', '=', self.groups_model_id), ('res_id', '=', group.id), ]) self.assertGreater(nb_logs, 0) # Milliseconds are ignored by autovacuum, waiting 1s ensure that # the logs generated will be processed by the vacuum time.sleep(1) autovacuum_model.autovacuum(days=0) nb_logs = log_model.search_count([ ('model_id', '=', self.groups_model_id), ('res_id', '=', group.id), ]) self.assertEqual(nb_logs, 0)
agpl-3.0
freeflightsim/fg-flying-club
google_appengine/google/appengine/datastore/datastore_index.py
3
15998
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Primitives for dealing with datastore indexes. Example index.yaml file: ------------------------ indexes: - kind: Cat ancestor: no properties: - name: name - name: age direction: desc - kind: Cat properties: - name: name direction: ascending - name: whiskers direction: descending - kind: Store ancestor: yes properties: - name: business direction: asc - name: owner direction: asc """ from google.appengine.api import datastore_types from google.appengine.api import validation from google.appengine.api import yaml_errors from google.appengine.api import yaml_object from google.appengine.datastore import datastore_pb from google.appengine.datastore import entity_pb class Property(validation.Validated): """Representation for an individual property of an index. Attributes: name: Name of attribute to sort by. direction: Direction of sort. """ ATTRIBUTES = { 'name': validation.TYPE_STR, 'direction': validation.Options(('asc', ('ascending',)), ('desc', ('descending',)), default='asc'), } class Index(validation.Validated): """Individual index definition. Order of the properties properties determins a given indixes sort priority. Attributes: kind: Datastore kind that index belongs to. ancestors: Include ancestors in index. properties: Properties to sort on. """ ATTRIBUTES = { 'kind': validation.TYPE_STR, 'ancestor': validation.Type(bool, default=False), 'properties': validation.Optional(validation.Repeated(Property)), } class IndexDefinitions(validation.Validated): """Top level for index definition file. Attributes: indexes: List of Index definitions. """ ATTRIBUTES = { 'indexes': validation.Optional(validation.Repeated(Index)), } def ParseIndexDefinitions(document): """Parse an individual index definitions document from string or stream. Args: document: Yaml document as a string or file-like stream. Raises: EmptyConfigurationFile when the configuration file is empty. MultipleConfigurationFile when the configuration file contains more than one document. Returns: Single parsed yaml file if one is defined, else None. """ try: return yaml_object.BuildSingleObject(IndexDefinitions, document) except yaml_errors.EmptyConfigurationFile: return None def ParseMultipleIndexDefinitions(document): """Parse multiple index definitions documents from a string or stream. Args: document: Yaml document as a string or file-like stream. Returns: A list of datstore_index.IndexDefinitions objects, one for each document. """ return yaml_object.BuildObjects(IndexDefinitions, document) def IndexDefinitionsToKeys(indexes): """Convert IndexDefinitions to set of keys. Args: indexes: A datastore_index.IndexDefinitions instance, or None. Returns: A set of keys constructed from the argument, each key being a tuple of the form (kind, ancestor, properties) where properties is a tuple of (name, direction) pairs, direction being ASCENDING or DESCENDING (the enums). """ keyset = set() if indexes is not None: if indexes.indexes: for index in indexes.indexes: keyset.add(IndexToKey(index)) return keyset def IndexToKey(index): """Convert Index to key. Args: index: A datastore_index.Index instance (not None!). Returns: A tuple of the form (kind, ancestor, properties) where properties is a tuple of (name, direction) pairs, direction being ASCENDING or DESCENDING (the enums). """ props = [] if index.properties is not None: for prop in index.properties: if prop.direction == 'asc': direction = ASCENDING else: direction = DESCENDING props.append((prop.name, direction)) return index.kind, index.ancestor, tuple(props) ASCENDING = datastore_pb.Query_Order.ASCENDING DESCENDING = datastore_pb.Query_Order.DESCENDING EQUALITY_OPERATORS = set((datastore_pb.Query_Filter.EQUAL, )) INEQUALITY_OPERATORS = set((datastore_pb.Query_Filter.LESS_THAN, datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL, datastore_pb.Query_Filter.GREATER_THAN, datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL, )) EXISTS_OPERATORS = set((datastore_pb.Query_Filter.EXISTS, )) _DIRECTION_MAP = { 'asc': entity_pb.Index_Property.ASCENDING, 'ascending': entity_pb.Index_Property.ASCENDING, 'desc': entity_pb.Index_Property.DESCENDING, 'descending': entity_pb.Index_Property.DESCENDING, } def Normalize(filters, orders): """ Normalizes filter and order query components. The resulting components have the same effect as the given components if used in a query. Returns: (filter, orders) the reduced set of filters and orders """ eq_properties = set() inequality_properties = set() for f in filters: if f.op() == datastore_pb.Query_Filter.IN and f.property_size() == 1: f.set_op(datastore_pb.Query_Filter.EQUAL) if f.op() in EQUALITY_OPERATORS: eq_properties.add(f.property(0).name()) elif f.op() in INEQUALITY_OPERATORS: inequality_properties.add(f.property(0).name()) eq_properties -= inequality_properties remove_set = eq_properties.copy() new_orders = [] for o in orders: if o.property() not in remove_set: remove_set.add(o.property()) new_orders.append(o) orders = new_orders if datastore_types._KEY_SPECIAL_PROPERTY in eq_properties: orders = [] new_orders = [] for o in orders: if o.property() == datastore_types._KEY_SPECIAL_PROPERTY: new_orders.append(o) break new_orders.append(o) orders = new_orders return (filters, orders) def RemoveNativelySupportedComponents(filters, orders): """ Removes query components that are natively supported by the datastore. The resulting filters and orders should not be used in an actual query. Returns (filters, orders) the reduced set of filters and orders """ (filters, orders) = Normalize(filters, orders) has_key_desc_order = False if orders and orders[-1].property() == datastore_types._KEY_SPECIAL_PROPERTY: if orders[-1].direction() == ASCENDING: orders = orders[:-1] else: has_key_desc_order = True if not has_key_desc_order: for f in filters: if (f.op() in INEQUALITY_OPERATORS and f.property(0).name() != datastore_types._KEY_SPECIAL_PROPERTY): break else: filters = [f for f in filters if f.property(0).name() != datastore_types._KEY_SPECIAL_PROPERTY] return (filters, orders) def CompositeIndexForQuery(query): """Return the composite index needed for a query. A query is translated into a tuple, as follows: - The first item is the kind string, or None if we're not filtering on kind (see below). - The second item is a bool giving whether the query specifies an ancestor. - After that come (property, ASCENDING) pairs for those Filter entries whose operator is EQUAL or IN. Since the order of these doesn't matter, they are sorted by property name to normalize them in order to avoid duplicates. - After that comes at most one (property, ASCENDING) pair for a Filter entry whose operator is on of the four inequalities. There can be at most one of these. - After that come all the (property, direction) pairs for the Order entries, in the order given in the query. Exceptions: (a) if there is a Filter entry with an inequality operator that matches the first Order entry, the first order pair is omitted (or, equivalently, in this case the inequality pair is omitted). (b) if an Order entry corresponds to an equality filter, it is ignored (since there will only ever be one value returned). (c) if there is an equality filter on __key__ all orders are dropped (since there will be at most one result returned). (d) if there is an order on __key__ all further orders are dropped (since keys are unique). (e) orders on __key__ ASCENDING are dropped (since this is supported natively by the datastore). - Finally, if there are Filter entries whose operator is EXISTS, and whose property names are not already listed, they are added, with the direction set to ASCENDING. This algorithm should consume all Filter and Order entries. Additional notes: - The low-level implementation allows queries that don't specify a kind; but the Python API doesn't support this yet. - If there's an inequality filter and one or more sort orders, the first sort order *must* match the inequality filter. - The following indexes are always built in and should be suppressed: - query on kind only; - query on kind and one filter *or* one order; - query on ancestor only, without kind (not exposed in Python yet); - query on kind and equality filters only, no order (with or without ancestor). - While the protocol buffer allows a Filter to contain multiple properties, we don't use this. It is only needed for the IN operator but this is (currently) handled on the client side, so in practice each Filter is expected to have exactly one property. Args: query: A datastore_pb.Query instance. Returns: A tuple of the form (required, kind, ancestor, (prop1, prop2, ...), neq): required: boolean, whether the index is required kind: the kind or None; ancestor: True if this is an ancestor query; prop1, prop2, ...: tuples of the form (name, direction) where: name: a property name; direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING; neq: the number of prop tuples corresponding to equality filters. """ required = True kind = query.kind() ancestor = query.has_ancestor() filters = query.filter_list() orders = query.order_list() for filter in filters: assert filter.op() != datastore_pb.Query_Filter.IN, 'Filter.op()==IN' nprops = len(filter.property_list()) assert nprops == 1, 'Filter has %s properties, expected 1' % nprops if not kind: required = False (filters, orders) = RemoveNativelySupportedComponents(filters, orders) eq_filters = [f for f in filters if f.op() in EQUALITY_OPERATORS] ineq_filters = [f for f in filters if f.op() in INEQUALITY_OPERATORS] exists_filters = [f for f in filters if f.op() in EXISTS_OPERATORS] assert (len(eq_filters) + len(ineq_filters) + len(exists_filters)) == len(filters), 'Not all filters used' if (kind and not ineq_filters and not exists_filters and not orders): names = set(f.property(0).name() for f in eq_filters) if not names.intersection(datastore_types._SPECIAL_PROPERTIES): required = False ineq_property = None if ineq_filters: ineq_property = ineq_filters[0].property(0).name() for filter in ineq_filters: assert filter.property(0).name() == ineq_property props = [] for f in eq_filters: prop = f.property(0) props.append((prop.name(), ASCENDING)) props.sort() if ineq_property: if orders: assert ineq_property == orders[0].property() else: props.append((ineq_property, ASCENDING)) for order in orders: props.append((order.property(), order.direction())) for filter in exists_filters: prop = filter.property(0) prop_name = prop.name() for name, direction in props: if name == prop_name: break else: props.append((prop_name, ASCENDING)) if kind and not ancestor and len(props) <= 1: required = False if props: prop, dir = props[0] if prop in datastore_types._SPECIAL_PROPERTIES and dir is DESCENDING: required = True return (required, kind, ancestor, tuple(props), len(eq_filters)) def IndexYamlForQuery(kind, ancestor, props): """Return the composite index definition YAML needed for a query. The arguments are the same as the tuples returned by CompositeIndexForQuery, without the last neq element. Args: kind: the kind or None ancestor: True if this is an ancestor query, False otherwise prop1, prop2, ...: tuples of the form (name, direction) where: name: a property name; direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING; Returns: A string with the YAML for the composite index needed by the query. """ yaml = [] yaml.append('- kind: %s' % kind) if ancestor: yaml.append(' ancestor: yes') if props: yaml.append(' properties:') for name, direction in props: yaml.append(' - name: %s' % name) if direction == DESCENDING: yaml.append(' direction: desc') return '\n'.join(yaml) def IndexDefinitionToProto(app_id, index_definition): """Transform individual Index definition to protocol buffer. Args: app_id: Application id for new protocol buffer CompositeIndex. index_definition: datastore_index.Index object to transform. Returns: New entity_pb.CompositeIndex with default values set and index information filled in. """ proto = entity_pb.CompositeIndex() proto.set_app_id(app_id) proto.set_id(0) proto.set_state(entity_pb.CompositeIndex.WRITE_ONLY) definition_proto = proto.mutable_definition() definition_proto.set_entity_type(index_definition.kind) definition_proto.set_ancestor(index_definition.ancestor) if index_definition.properties is not None: for prop in index_definition.properties: prop_proto = definition_proto.add_property() prop_proto.set_name(prop.name) prop_proto.set_direction(_DIRECTION_MAP[prop.direction]) return proto def IndexDefinitionsToProtos(app_id, index_definitions): """Transform multiple index definitions to composite index records Args: app_id: Application id for new protocol buffer CompositeIndex. index_definition: A list of datastore_index.Index objects to transform. Returns: A list of tranformed entity_pb.Compositeindex entities with default values set and index information filled in. """ return [IndexDefinitionToProto(app_id, index) for index in index_definitions] def ProtoToIndexDefinition(proto): """Transform individual index protocol buffer to index definition. Args: proto: An instance of entity_pb.CompositeIndex to transform. Returns: A new instance of datastore_index.Index. """ properties = [] proto_index = proto.definition() for prop_proto in proto_index.property_list(): prop_definition = Property(name=prop_proto.name()) if prop_proto.direction() == entity_pb.Index_Property.DESCENDING: prop_definition.direction = 'descending' properties.append(prop_definition) index = Index(kind=proto_index.entity_type(), properties=properties) if proto_index.ancestor(): index.ancestor = True return index def ProtosToIndexDefinitions(protos): """Transform multiple index protocol buffers to index definitions. Args: A list of entity_pb.Index records. """ return [ProtoToIndexDefinition(definition) for definition in protos]
gpl-2.0
stasm/app-validator
appvalidator/testcases/packagelayout.py
5
5740
import zlib from . import register_test # Detect blacklisted files based on their extension. blacklisted_extensions = ("dll", "exe", "dylib", "so", "sh", "class") blacklisted_magic_numbers = ( (0x4d, 0x5a), # EXE/DLL (0x5a, 0x4d), # Alternative for EXE/DLL (0x7f, 0x45, 0x4c, 0x46), # UNIX elf (0x23, 0x21), # Shebang (shell script) (0xca, 0xfe, 0xba, 0xbe), # Java + Mach-O (dylib) (0xca, 0xfe, 0xd0, 0x0d), # Java (packed) (0xfe, 0xed, 0xfa, 0xce), # Mach-O (0x46, 0x57, 0x53), # Uncompressed SWF (0x43, 0x57, 0x53), # ZLIB compressed SWF ) VC_DIRS = (".git", ".svn", ) @register_test(tier=1) def test_blacklisted_files(err, package=None): "Detects blacklisted files and extensions." if not package: return flagged_files = [] flagged_for_vc = False for name in package: file_ = package.info(name) if (file_["name_lower"].startswith(" ") or file_["name_lower"].endswith(" ")): err.error( err_id=("packagelayout", "invalid_name"), error="Filename starts with or ends with invalid character.", description=["A filename within the package was found to " "begin or end with a space. This is not " "allowed.", "Detected filename: '%s'" % name], filename=name) continue # Simple test to ensure that the extension isn't blacklisted extension = file_["extension"] if extension in blacklisted_extensions: # Note that there is a binary extension in the metadata err.metadata["contains_binary_extension"] = True flagged_files.append(name) continue if any(x in VC_DIRS for x in name.lower().split("/")): if flagged_for_vc: continue flagged_for_vc = True err.error( err_id=("packagelayout", "version_control"), error="Version control detected in package", description=["A version control directory was detected in " "your package. Version control may not be " "included as part of a packaged app due to size " "and potentially sensitive data.", "Detected file: %s" % name], filename=name) continue # Perform a deep inspection to detect magic numbers for known binary # and executable file types. try: z = package.zf.open(name) bytes = tuple(map(ord, z.read(4))) # Longest is 4 bytes z.close() except zlib.error: # Tell the zip that there's a broken file. package.broken_files.add(name) return err.error( err_id=("packagelayout", "blacklisted_files", "bad_zip"), error="ZIP could not be read", description="Validation failed because the ZIP package does " "not seem to be valid. One or more files could not " "be successfully unzipped.", filename=name) if any(bytes[0:len(x)] == x for x in blacklisted_magic_numbers): # Note that there is binary content in the metadata err.metadata["contains_binary_content"] = True err.warning( err_id=("testcases_packagelayout", "test_blacklisted_files", "disallowed_file_type"), warning="Flagged file type found", description=["A file was found to contain flagged content " "(i.e.: executable data, potentially " "unauthorized scripts, etc.).", u"The file \"%s\" contains flagged content" % name], filename=name) if flagged_files: err.warning( err_id=("testcases_packagelayout", "test_blacklisted_files", "disallowed_extension"), warning="Flagged file extensions found.", description=["Files whose names end with flagged extensions have " "been found in the app.", "The extension of these files are flagged because " "they usually identify binary components, which can " "contain malware.", "\n".join(flagged_files)]) @register_test(tier=1) def test_layout_all(err, package): """Tests the well-formedness of apps.""" if not package: return package_namelist = list(package.zf.namelist()) package_nameset = set(package_namelist) if len(package_namelist) != len(package_nameset): err.error( err_id=("testcases_packagelayout", "test_layout_all", "duplicate_entries"), error="Package contains duplicate entries", description="The package contains multiple entries with the same " "name. This practice has been banned. Try unzipping " "and re-zipping your app and try again.") if any(name.startswith('META-INF/') for name in package_nameset): err.error( err_id=("testcases_packagelayout", "test_layout_all", "META-INF"), error="Packages must not contain META-INF", description="Packages must not contain a META-INF directory. This " "directory prevents apps from being properly signed.")
bsd-3-clause
trotterdylan/grumpy
testing/generator_test.py
8
1637
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import types def gen1(): yield 1 yield 2 yield 3 g = gen1() assert isinstance(g, types.GeneratorType) assert list(g) == [1, 2, 3] assert list(g) == [] # pylint: disable=g-explicit-bool-comparison def gen2(): for c in 'foobar': yield c yield '!' g = gen2() assert list(g) == ['f', 'o', 'o', 'b', 'a', 'r', '!'] assert list(g) == [] # pylint: disable=g-explicit-bool-comparison def gen3(): raise RuntimeError yield 1 # pylint: disable=unreachable g = gen3() try: g.next() except RuntimeError: pass assert list(g) == [] # pylint: disable=g-explicit-bool-comparison def gen4(): yield g.next() g = gen4() try: g.next() except ValueError as e: assert 'generator already executing' in str(e), str(e) else: raise AssertionError def gen5(): yield g = gen5() try: g.send('foo') except TypeError as e: assert "can't send non-None value to a just-started generator" in str(e) else: raise AssertionError def gen6(): yield 1 return yield 2 g = gen6() assert list(g) == [1] assert list(g) == []
apache-2.0
ThePletch/ansible
lib/ansible/modules/identity/ipa/ipa_user.py
28
12043
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: ipa_user author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA users description: - Add, modify and delete user within IPA server options: displayname: description: Display name required: false givenname: description: First name required: false loginshell: description: Login shell required: false mail: description: - List of mail addresses assigned to the user. - If an empty list is passed all assigned email addresses will be deleted. - If None is passed email addresses will not be checked or changed. required: false password: description: - Password required: false sn: description: Surname required: false sshpubkey: description: - List of public SSH key. - If an empty list is passed all assigned public keys will be deleted. - If None is passed SSH public keys will not be checked or changed. required: false state: description: State to ensure required: false default: "present" choices: ["present", "absent", "enabled", "disabled"] telephonenumber: description: - List of telephone numbers assigned to the user. - If an empty list is passed all assigned telephone numbers will be deleted. - If None is passed telephone numbers will not be checked or changed. required: false title: description: Title required: false uid: description: uid of the user required: true aliases: ["name"] ipa_port: description: Port of IPA server required: false default: 443 ipa_host: description: IP or hostname of IPA server required: false default: "ipa.example.com" ipa_user: description: Administrative account used on IPA server required: false default: "admin" ipa_pass: description: Password of administrative user required: true ipa_prot: description: Protocol used by IPA server required: false default: "https" choices: ["http", "https"] validate_certs: description: - This only applies if C(ipa_prot) is I(https). - If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites using self-signed certificates. required: false default: true version_added: "2.3" requirements: - base64 - hashlib ''' EXAMPLES = ''' # Ensure pinky is present - ipa_user: name: pinky state: present givenname: Pinky sn: Acme mail: - pinky@acme.com telephonenumber: - '+555123456' sshpubkeyfp: - ssh-rsa .... - ssh-dsa .... ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret # Ensure brain is absent - ipa_user: name: brain state: absent ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret ''' RETURN = ''' user: description: User as returned by IPA API returned: always type: dict ''' import base64 import hashlib from ansible.module_utils.ipa import IPAClient class UserIPAClient(IPAClient): def __init__(self, module, host, port, protocol): super(UserIPAClient, self).__init__(module, host, port, protocol) def user_find(self, name): return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) def user_add(self, name, item): return self._post_json(method='user_add', name=name, item=item) def user_mod(self, name, item): return self._post_json(method='user_mod', name=name, item=item) def user_del(self, name): return self._post_json(method='user_del', name=name) def user_disable(self, name): return self._post_json(method='user_disable', name=name) def user_enable(self, name): return self._post_json(method='user_enable', name=name) def get_user_dict(displayname=None, givenname=None, loginshell=None, mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, title=None, userpassword=None): user = {} if displayname is not None: user['displayname'] = displayname if givenname is not None: user['givenname'] = givenname if loginshell is not None: user['loginshell'] = loginshell if mail is not None: user['mail'] = mail user['nsaccountlock'] = nsaccountlock if sn is not None: user['sn'] = sn if sshpubkey is not None: user['ipasshpubkey'] = sshpubkey if telephonenumber is not None: user['telephonenumber'] = telephonenumber if title is not None: user['title'] = title if userpassword is not None: user['userpassword'] = userpassword return user def get_user_diff(ipa_user, module_user): """ Return the keys of each dict whereas values are different. Unfortunately the IPA API returns everything as a list even if only a single value is possible. Therefore some more complexity is needed. The method will check if the value type of module_user.attr is not a list and create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method must not be changed if the returned API dict is changed. :param ipa_user: :param module_user: :return: """ # return [item for item in module_user.keys() if module_user.get(item, None) != ipa_user.get(item, None)] result = [] # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. # These are used for comparison. sshpubkey = None if 'ipasshpubkey' in module_user: module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey) for pubkey in module_user['ipasshpubkey']] # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on sshpubkey = module_user['ipasshpubkey'] del module_user['ipasshpubkey'] for key in module_user.keys(): mod_value = module_user.get(key, None) ipa_value = ipa_user.get(key, None) if isinstance(ipa_value, list) and not isinstance(mod_value, list): mod_value = [mod_value] if isinstance(ipa_value, list) and isinstance(mod_value, list): mod_value = sorted(mod_value) ipa_value = sorted(ipa_value) if mod_value != ipa_value: result.append(key) # If there are public keys, remove the fingerprints and add them back to the dict if sshpubkey is not None: del module_user['sshpubkeyfp'] module_user['ipasshpubkey'] = sshpubkey return result def get_ssh_key_fingerprint(ssh_key): """ Return the public key fingerprint of a given public SSH key in format "FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 [user@host] (ssh-rsa)" :param ssh_key: :return: """ parts = ssh_key.strip().split() if len(parts) == 0: return None key_type = parts[0] key = base64.b64decode(parts[1].encode('ascii')) fp_plain = hashlib.md5(key).hexdigest() key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() if len(parts) < 3: return "%s (%s)" % (key_fp, key_type) else: user_host = parts[2] return "%s %s (%s)" % (key_fp, user_host, key_type) def ensure(module, client): state = module.params['state'] name = module.params['name'] nsaccountlock = state == 'disabled' module_user = get_user_dict(displayname=module.params.get('displayname'), givenname=module.params.get('givenname'), loginshell=module.params['loginshell'], mail=module.params['mail'], sn=module.params['sn'], sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, telephonenumber=module.params['telephonenumber'], title=module.params['title'], userpassword=module.params['password']) ipa_user = client.user_find(name=name) changed = False if state in ['present', 'enabled', 'disabled']: if not ipa_user: changed = True if not module.check_mode: ipa_user = client.user_add(name=name, item=module_user) else: diff = get_user_diff(ipa_user, module_user) if len(diff) > 0: changed = True if not module.check_mode: ipa_user = client.user_mod(name=name, item=module_user) else: if ipa_user: changed = True if not module.check_mode: client.user_del(name) return changed, ipa_user def main(): module = AnsibleModule( argument_spec=dict( displayname=dict(type='str', required=False), givenname=dict(type='str', required=False), loginshell=dict(type='str', required=False), mail=dict(type='list', required=False), sn=dict(type='str', required=False), uid=dict(type='str', required=True, aliases=['name']), password=dict(type='str', required=False, no_log=True), sshpubkey=dict(type='list', required=False), state=dict(type='str', required=False, default='present', choices=['present', 'absent', 'enabled', 'disabled']), telephonenumber=dict(type='list', required=False), title=dict(type='str', required=False), ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), ipa_host=dict(type='str', required=False, default='ipa.example.com'), ipa_port=dict(type='int', required=False, default=443), ipa_user=dict(type='str', required=False, default='admin'), ipa_pass=dict(type='str', required=True, no_log=True), validate_certs=dict(type='bool', required=False, default=True), ), supports_check_mode=True, ) client = UserIPAClient(module=module, host=module.params['ipa_host'], port=module.params['ipa_port'], protocol=module.params['ipa_prot']) # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey # as different which should be avoided. if module.params['sshpubkey'] is not None: if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] is "": module.params['sshpubkey'] = None try: client.login(username=module.params['ipa_user'], password=module.params['ipa_pass']) changed, user = ensure(module, client) module.exit_json(changed=changed, user=user) except Exception: e = get_exception() module.fail_json(msg=str(e)) from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main()
gpl-3.0
errordeveloper/fe-devel
Native/Test/Python/selectors-struct.py
3
2465
# # Copyright 2010-2012 Fabric Engine Inc. All rights reserved. # import fabric FABRIC = fabric.createClient() class Vec3(): def __init__( self, x = None, y = None, z = None ): if type( x ) is int and type( y ) is int and type( z ) is int: self.x = x self.y = y self.z = z elif x is None and y is None and z is None: self.x = 0 self.y = 0 self.z = 0 else: raise Exception( 'new Vec3: invalid arguments' ) desc = { 'members': [ { 'x':'Scalar' }, { 'y':'Scalar' }, { 'z':'Scalar' } ], 'constructor': Vec3, 'kBindings': "\ function Vec3( Scalar x, Scalar y, Scalar z )\n\ {\n\ this.x = x;\n\ this.y = y;\n\ this.z = z;\n\ }\n\ " } FABRIC.RegisteredTypesManager.registerType( 'Vec3', desc ) event = FABRIC.DependencyGraph.createEvent("event") eventHandler = FABRIC.DependencyGraph.createEventHandler("eventHandler") event.appendEventHandler( eventHandler ) node = FABRIC.DependencyGraph.createNode( "node" ) eventHandler.setScope( 'self', node ) nodeOne = FABRIC.DependencyGraph.createNode( "nodeOne" ) operatorOne = FABRIC.DependencyGraph.createOperator( "operatorOne" ) operatorOne.setEntryPoint('test') operatorOne.setSourceCode("operator test( io Boolean select, io Vec3 value ) { select = true; value.x = 7; }") bindingOne = FABRIC.DependencyGraph.createBinding() bindingOne.setOperator( operatorOne ) bindingOne.setParameterLayout( [] ) eventHandlerOne = FABRIC.DependencyGraph.createEventHandler("eventHandlerOne") eventHandlerOne.setScope( 'self', nodeOne ) eventHandlerOne.setSelector( 'self', bindingOne ) eventHandler.appendChildEventHandler( eventHandlerOne ) nodeTwo = FABRIC.DependencyGraph.createNode( "nodeTwo" ) operatorTwo = FABRIC.DependencyGraph.createOperator( "operatorTwo" ) operatorTwo.setEntryPoint('test') operatorTwo.setSourceCode("operator test( io Boolean select, io Vec3 value ) { value.x = 4; }") bindingTwo = FABRIC.DependencyGraph.createBinding() bindingTwo.setOperator( operatorTwo ) bindingTwo.setParameterLayout( [] ) eventHandlerTwo = FABRIC.DependencyGraph.createEventHandler("eventHandlerTwo") eventHandlerTwo.setScope( 'self', nodeTwo ) eventHandlerTwo.setSelector( 'self', bindingTwo ) eventHandler.appendChildEventHandler( eventHandlerTwo ) event.setSelectType( 'Vec3' ) result = event.select() if len( result ) != 1: print( "incorrect number of results" ) if result[0]['value'].x != 7: print( "incorrect value of results" ) FABRIC.close()
agpl-3.0
kaedroho/django
tests/template_tests/filter_tests/test_phone2numeric.py
176
1500
from django.template.defaultfilters import phone2numeric_filter from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class Phone2numericTests(SimpleTestCase): @setup({'phone2numeric01': '{{ a|phone2numeric }} {{ b|phone2numeric }}'}) def test_phone2numeric01(self): output = self.engine.render_to_string( 'phone2numeric01', {'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')}, ) self.assertEqual(output, '&lt;1-800-2255-63&gt; <1-800-2255-63>') @setup({'phone2numeric02': '{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}'}) def test_phone2numeric02(self): output = self.engine.render_to_string( 'phone2numeric02', {'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')}, ) self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>') @setup({'phone2numeric03': '{{ a|phone2numeric }}'}) def test_phone2numeric03(self): output = self.engine.render_to_string( 'phone2numeric03', {'a': 'How razorback-jumping frogs can level six piqued gymnasts!'}, ) self.assertEqual( output, '469 729672225-5867464 37647 226 53835 749 747833 49662787!' ) class FunctionTests(SimpleTestCase): def test_phone2numeric(self): self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
bsd-3-clause
kingcons/pybrightcove
pybrightcove/enums.py
3
4353
# Copyright (c) 2009 StudioNow, Inc <patrick@studionow.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # pylint: disable=C0111,R0903 class SortByType(object): """ PUBLISH_DATE: Date title was published CREATION_DATE: Date title was created. MODIFIED_DATE Date title was last modified. PLAYS_TOTAL Number of times this title has been viewed. PLAYS_TRAILING_WEEK Number of times this title has been viewed in the past 7 days (excluding today). """ PUBLISH_DATE = "PUBLISH_DATE" CREATION_DATE = "CREATION_DATE" MODIFIED_DATE = "MODIFIED_DATE" PLAYS_TOTAL = "PLAYS_TOTAL" PLAYS_TRAILING_WEEK = "PLAYS_TRAILING_WEEK" class SortByOrderType(object): """ ASC: Ascending DESC: Descending """ ASC = "ASC" DESC = "DESC" class UploadStatusEnum(object): """ UPLOADING: File is still uploading PROCESSING: Upload complete; being processed. COMPLETE: Upload and processing complete. ERROR: Error in upload or processing. """ UPLOADING = "UPLOADING" PROCESSING = "PROCESSING" COMPLETE = "COMPLETE" ERROR = "ERROR" class EconomicsEnum(object): FREE = "FREE" AD_SUPPORTED = "AD_SUPPORTED" class EncodeToEnum(object): MP4 = 'MP4' FLV = 'FLV' class ItemStateEnum(object): ACTIVE = "ACTIVE" INACTIVE = "INACTIVE" DELETED = "DELETED" class PlaylistTypeEnum(object): """ EXPLICIT: A manual playlist, the videos of which were added individually. OLDEST_TO_NEWEST: A smart playlist, ordered from oldest to newest by last-modified date. NEWEST_TO_OLDEST: A smart playlist, ordered from newest to oldest by last-modified date. ALPHABETICAL: A smart playlist, ordered alphabetically. PLAYS_TOTAL: A smart playlist, ordered by total plays. PLAYS_TRAILING_WEEK: A smart playlist, ordered by most plays in the past week. """ EXPLICIT = "EXPLICIT" OLDEST_TO_NEWEST = "OLDEST_TO_NEWEST" NEWEST_TO_OLDEST = "NEWEST_TO_OLDEST" ALPHABETICAL = "ALPHABETICAL" PLAYS_TOTAL = "PLAYS_TOTAL" PLAYS_TRAILING_WEEK = "PLAYS_TRAILING_WEEK" class FilterChoicesEnum(object): PLAYABLE = "PLAYABLE" UNSCHEDULED = 'UNSCHEDULED' INACTIVE = 'INACTIVE' DELETED = 'DELETED' class VideoCodecEnum(object): UNDEFINED = "UNDEFINED" NONE = "NONE" SORENSON = "SORENSON" ON2 = "ON2" H264 = "H264" class ImageTypeEnum(object): VIDEO_STILL = "VIDEO_STILL" SYNDICATION_STILL = "SYNDICATION_STILL" THUMBNAIL = "THUMBNAIL" BACKGROUND = "BACKGROUND" LOGO = "LOGO" LOGO_OVERLAY = "LOGO_OVERLAY" class VideoTypeEnum(object): FLV_PREVIEW = "FLV_PREVIEW" FLV_FULL = "FLV_FULL" FLV_BUMPER = "FLV_BUMPER" DIGITAL_MASTER = "DIGITAL_MASTER" class AssetTypeEnum(object): VIDEO_FULL = "VIDEO_FULL" FLV_BUMPER = "FLV_BUMPER" THUMBNAIL = "THUMBNAIL" VIDEO_STILL = "VIDEO_STILL" BACKGROUND = "BACKGROUND" LOGO = "LOGO" LOGO_OVERLAY = "LOGO_OVERLAY" OTHER_IMAGE = "OTHER_IMAGE" class CustomMetaType(object): ENUM = 'enum' STRING = 'string' DEFAULT_SORT_BY = SortByType.CREATION_DATE DEFAULT_SORT_ORDER = SortByOrderType.ASC
mit
Cashiuus/metagoofil
hachoir_parser/archive/rar.py
84
13364
""" RAR parser Status: can only read higher-level attructures Author: Christophe Gisquet """ from hachoir_parser import Parser from hachoir_core.field import (StaticFieldSet, FieldSet, Bit, Bits, Enum, UInt8, UInt16, UInt32, UInt64, String, TimeDateMSDOS32, NullBytes, NullBits, RawBytes) from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal from hachoir_core.endian import LITTLE_ENDIAN from hachoir_parser.common.msdos import MSDOSFileAttr32 MAX_FILESIZE = 1000 * 1024 * 1024 BLOCK_NAME = { 0x72: "Marker", 0x73: "Archive", 0x74: "File", 0x75: "Comment", 0x76: "Extra info", 0x77: "Subblock", 0x78: "Recovery record", 0x79: "Archive authenticity", 0x7A: "New-format subblock", 0x7B: "Archive end", } COMPRESSION_NAME = { 0x30: "Storing", 0x31: "Fastest compression", 0x32: "Fast compression", 0x33: "Normal compression", 0x34: "Good compression", 0x35: "Best compression" } OS_MSDOS = 0 OS_WIN32 = 2 OS_NAME = { 0: "MS DOS", 1: "OS/2", 2: "Win32", 3: "Unix", } DICTIONARY_SIZE = { 0: "Dictionary size 64 Kb", 1: "Dictionary size 128 Kb", 2: "Dictionary size 256 Kb", 3: "Dictionary size 512 Kb", 4: "Dictionary size 1024 Kb", 7: "File is a directory", } def formatRARVersion(field): """ Decodes the RAR version stored on 1 byte """ return "%u.%u" % divmod(field.value, 10) def commonFlags(s): yield Bit(s, "has_added_size", "Additional field indicating additional size") yield Bit(s, "is_ignorable", "Old versions of RAR should ignore this block when copying data") class ArchiveFlags(StaticFieldSet): format = ( (Bit, "vol", "Archive volume"), (Bit, "has_comment", "Whether there is a comment"), (Bit, "is_locked", "Archive volume"), (Bit, "is_solid", "Whether files can be extracted separately"), (Bit, "new_numbering", "New numbering, or compressed comment"), # From unrar (Bit, "has_authenticity_information", "The integrity/authenticity of the archive can be checked"), (Bit, "is_protected", "The integrity/authenticity of the archive can be checked"), (Bit, "is_passworded", "Needs a password to be decrypted"), (Bit, "is_first_vol", "Whether it is the first volume"), (Bit, "is_encrypted", "Whether the encryption version is present"), (NullBits, "internal", 6, "Reserved for 'internal use'") ) def archiveFlags(s): yield ArchiveFlags(s, "flags", "Archiver block flags") def archiveHeader(s): yield NullBytes(s, "reserved[]", 2, "Reserved word") yield NullBytes(s, "reserved[]", 4, "Reserved dword") def commentHeader(s): yield filesizeHandler(UInt16(s, "total_size", "Comment header size + comment size")) yield filesizeHandler(UInt16(s, "uncompressed_size", "Uncompressed comment size")) yield UInt8(s, "required_version", "RAR version needed to extract comment") yield UInt8(s, "packing_method", "Comment packing method") yield UInt16(s, "comment_crc16", "Comment CRC") def commentBody(s): size = s["total_size"].value - s.current_size if size > 0: yield RawBytes(s, "comment_data", size, "Compressed comment data") def signatureHeader(s): yield TimeDateMSDOS32(s, "creation_time") yield filesizeHandler(UInt16(s, "arc_name_size")) yield filesizeHandler(UInt16(s, "user_name_size")) def recoveryHeader(s): yield filesizeHandler(UInt32(s, "total_size")) yield textHandler(UInt8(s, "version"), hexadecimal) yield UInt16(s, "rec_sectors") yield UInt32(s, "total_blocks") yield RawBytes(s, "mark", 8) def avInfoHeader(s): yield filesizeHandler(UInt16(s, "total_size", "Total block size")) yield UInt8(s, "version", "Version needed to decompress", handler=hexadecimal) yield UInt8(s, "method", "Compression method", handler=hexadecimal) yield UInt8(s, "av_version", "Version for AV", handler=hexadecimal) yield UInt32(s, "av_crc", "AV info CRC32", handler=hexadecimal) def avInfoBody(s): size = s["total_size"].value - s.current_size if size > 0: yield RawBytes(s, "av_info_data", size, "AV info") class FileFlags(FieldSet): static_size = 16 def createFields(self): yield Bit(self, "continued_from", "File continued from previous volume") yield Bit(self, "continued_in", "File continued in next volume") yield Bit(self, "is_encrypted", "File encrypted with password") yield Bit(self, "has_comment", "File comment present") yield Bit(self, "is_solid", "Information from previous files is used (solid flag)") # The 3 following lines are what blocks more staticity yield Enum(Bits(self, "dictionary_size", 3, "Dictionary size"), DICTIONARY_SIZE) for bit in commonFlags(self): yield bit yield Bit(self, "is_large", "file64 operations needed") yield Bit(self, "is_unicode", "Filename also encoded using Unicode") yield Bit(self, "has_salt", "Has salt for encryption") yield Bit(self, "uses_file_version", "File versioning is used") yield Bit(self, "has_ext_time", "Extra time ??") yield Bit(self, "has_ext_flags", "Extra flag ??") def fileFlags(s): yield FileFlags(s, "flags", "File block flags") class ExtTime(FieldSet): def createFields(self): yield textHandler(UInt16(self, "time_flags", "Flags for extended time"), hexadecimal) flags = self["time_flags"].value for index in xrange(4): rmode = flags >> ((3-index)*4) if rmode & 8: if index: yield TimeDateMSDOS32(self, "dos_time[]", "DOS Time") if rmode & 3: yield RawBytes(self, "remainder[]", rmode & 3, "Time remainder") def specialHeader(s, is_file): yield filesizeHandler(UInt32(s, "compressed_size", "Compressed size (bytes)")) yield filesizeHandler(UInt32(s, "uncompressed_size", "Uncompressed size (bytes)")) yield Enum(UInt8(s, "host_os", "Operating system used for archiving"), OS_NAME) yield textHandler(UInt32(s, "crc32", "File CRC32"), hexadecimal) yield TimeDateMSDOS32(s, "ftime", "Date and time (MS DOS format)") yield textHandler(UInt8(s, "version", "RAR version needed to extract file"), formatRARVersion) yield Enum(UInt8(s, "method", "Packing method"), COMPRESSION_NAME) yield filesizeHandler(UInt16(s, "filename_length", "File name size")) if s["host_os"].value in (OS_MSDOS, OS_WIN32): yield MSDOSFileAttr32(s, "file_attr", "File attributes") else: yield textHandler(UInt32(s, "file_attr", "File attributes"), hexadecimal) # Start additional field from unrar if s["flags/is_large"].value: yield filesizeHandler(UInt64(s, "large_size", "Extended 64bits filesize")) # End additional field size = s["filename_length"].value if size > 0: if s["flags/is_unicode"].value: charset = "UTF-8" else: charset = "ISO-8859-15" yield String(s, "filename", size, "Filename", charset=charset) # Start additional fields from unrar - file only if is_file: if s["flags/has_salt"].value: yield textHandler(UInt8(s, "salt", "Salt"), hexadecimal) if s["flags/has_ext_time"].value: yield ExtTime(s, "extra_time", "Extra time info") def fileHeader(s): return specialHeader(s, True) def fileBody(s): # File compressed data size = s["compressed_size"].value if s["flags/is_large"].value: size += s["large_size"].value if size > 0: yield RawBytes(s, "compressed_data", size, "File compressed data") def fileDescription(s): return "File entry: %s (%s)" % \ (s["filename"].display, s["compressed_size"].display) def newSubHeader(s): return specialHeader(s, False) class EndFlags(StaticFieldSet): format = ( (Bit, "has_next_vol", "Whether there is another next volume"), (Bit, "has_data_crc", "Whether a CRC value is present"), (Bit, "rev_space"), (Bit, "has_vol_number", "Whether the volume number is present"), (Bits, "unused[]", 4), (Bit, "has_added_size", "Additional field indicating additional size"), (Bit, "is_ignorable", "Old versions of RAR should ignore this block when copying data"), (Bits, "unused[]", 6), ) def endFlags(s): yield EndFlags(s, "flags", "End block flags") class BlockFlags(FieldSet): static_size = 16 def createFields(self): yield textHandler(Bits(self, "unused[]", 8, "Unused flag bits"), hexadecimal) yield Bit(self, "has_added_size", "Additional field indicating additional size") yield Bit(self, "is_ignorable", "Old versions of RAR should ignore this block when copying data") yield Bits(self, "unused[]", 6) class Block(FieldSet): BLOCK_INFO = { # None means 'use default function' 0x72: ("marker", "Archive header", None, None, None), 0x73: ("archive_start", "Archive info", archiveFlags, archiveHeader, None), 0x74: ("file[]", fileDescription, fileFlags, fileHeader, fileBody), 0x75: ("comment[]", "Stray comment", None, commentHeader, commentBody), 0x76: ("av_info[]", "Extra information", None, avInfoHeader, avInfoBody), 0x77: ("sub_block[]", "Stray subblock", None, newSubHeader, fileBody), 0x78: ("recovery[]", "Recovery block", None, recoveryHeader, None), 0x79: ("signature", "Signature block", None, signatureHeader, None), 0x7A: ("new_sub_block[]", "Stray new-format subblock", fileFlags, newSubHeader, fileBody), 0x7B: ("archive_end", "Archive end block", endFlags, None, None), } def __init__(self, parent, name): FieldSet.__init__(self, parent, name) t = self["block_type"].value if t in self.BLOCK_INFO: self._name, desc, parseFlags, parseHeader, parseBody = self.BLOCK_INFO[t] if callable(desc): self.createDescription = lambda: desc(self) elif desc: self._description = desc if parseFlags : self.parseFlags = lambda: parseFlags(self) if parseHeader : self.parseHeader = lambda: parseHeader(self) if parseBody : self.parseBody = lambda: parseBody(self) else: self.info("Processing as unknown block block of type %u" % type) self._size = 8*self["block_size"].value if t == 0x74 or t == 0x7A: self._size += 8*self["compressed_size"].value if "is_large" in self["flags"] and self["flags/is_large"].value: self._size += 8*self["large_size"].value elif "has_added_size" in self: self._size += 8*self["added_size"].value # TODO: check if any other member is needed here def createFields(self): yield textHandler(UInt16(self, "crc16", "Block CRC16"), hexadecimal) yield textHandler(UInt8(self, "block_type", "Block type"), hexadecimal) # Parse flags for field in self.parseFlags(): yield field # Get block size yield filesizeHandler(UInt16(self, "block_size", "Block size")) # Parse remaining header for field in self.parseHeader(): yield field # Finish header with stuff of unknow size size = self["block_size"].value - (self.current_size//8) if size > 0: yield RawBytes(self, "unknown", size, "Unknow data (UInt32 probably)") # Parse body for field in self.parseBody(): yield field def createDescription(self): return "Block entry: %s" % self["type"].display def parseFlags(self): yield BlockFlags(self, "flags", "Block header flags") def parseHeader(self): if "has_added_size" in self["flags"] and \ self["flags/has_added_size"].value: yield filesizeHandler(UInt32(self, "added_size", "Supplementary block size")) def parseBody(self): """ Parse what is left of the block """ size = self["block_size"].value - (self.current_size//8) if "has_added_size" in self["flags"] and self["flags/has_added_size"].value: size += self["added_size"].value if size > 0: yield RawBytes(self, "body", size, "Body data") class RarFile(Parser): MAGIC = "Rar!\x1A\x07\x00" PARSER_TAGS = { "id": "rar", "category": "archive", "file_ext": ("rar",), "mime": (u"application/x-rar-compressed", ), "min_size": 7*8, "magic": ((MAGIC, 0),), "description": "Roshal archive (RAR)", } endian = LITTLE_ENDIAN def validate(self): magic = self.MAGIC if self.stream.readBytes(0, len(magic)) != magic: return "Invalid magic" return True def createFields(self): while not self.eof: yield Block(self, "block[]") def createContentSize(self): start = 0 end = MAX_FILESIZE * 8 pos = self.stream.searchBytes("\xC4\x3D\x7B\x00\x40\x07\x00", start, end) if pos is not None: return pos + 7*8 return None
gpl-2.0
iHamsterball/stellaris_tech_tree
stellaris_tech_tree/deep_parsers/weight_modifiers.py
1
25996
# -*- coding: utf-8 -*- from django.utils.translation import ugettext import re import yaml import sys localization_map = {} def parse(modifier, loc_data): global localization_map localization_map = loc_data if len(modifier) == 1: modifier.append({'always': 'yes'}) try: factor = next(iter(key for key in modifier if list(key)[0] == 'factor'))['factor'] adjustment = _localize_factor(factor) except StopIteration: add = next(iter(line for line in modifier if list(line)[0] == 'add'))['add'] adjustment = _localize_add(add) unparsed_conditions = [line for line in modifier if list(line)[0] not in ['factor', 'add']] if len(unparsed_conditions) > 1: unparsed_conditions = [{'AND': unparsed_conditions}] conditions = [_parse_condition(condition) for condition in unparsed_conditions] yaml_output = yaml.dump({adjustment: conditions}, indent=4, default_flow_style=False, allow_unicode=True) pseudo_yaml = re.sub(r'(\xd7[\d.]+):\n\s*- ', r'(\1)', yaml_output).replace('- ', u'• ') return pseudo_yaml def _alt_key(key): yield key yield 'bypass_{}'.format(key).upper() yield '{}_base'.format(key).lower() yield '{}_name'.format(key).lower() def _localize(arg): localized = None key = arg if isinstance(arg, str) else arg.group(1) for alt_key in _alt_key(key): try: localized = localization_map[alt_key] break except KeyError: continue except Exception as e: print(e.with_traceback) if not localized: print('Unprocessed weight modifier {}: {}'.format(key, localized)) else: while '$' in localized: localized = re.sub(r'\$([\w|+=]+)\$', _localize, localized) if '@' in localized: localized = _localize(localized.replace('@', '')) print(key, localized) if '_' in localized: print('Unprocessed weight modifier {}: {}'.format(key, localized)) return localized def _parse_condition(condition): key = next(iter(condition)) value = condition[key] try: return globals()['_localize_' + key.lower()](value) except KeyError: print('Error missing localization function {}, value: {}'.format( '_localize_' + key.lower(), value)) return '' def _expert(expert): return { 'Physics': ugettext('Physics'), 'Society': ugettext('Society'), 'Engineering': ugettext('Engineering'), }.get(expert, 'Shouldn\'t be here') def _localize_factor(factor): # TODO: Temporary workaround, highly probably malfunctioned on next update. # Version 2.3 factor = factor if not isinstance(factor, str) else 1.5 return u'\xD7{}'.format(factor) def _localize_add(add): sign = '' if add == 0 else '+' if add > 0 else '-' return '{}{}'.format(sign, add) def _localize_has_ethic(value): ethic = _localize(value) return ugettext('Has {} Ethic').format(ethic) def _localize_has_not_ethic(value): ethic = _localize(value) return ugettext('Does NOT have {} Ethic').format(ethic) def _localize_has_civic(value): civic = _localize(value) return ugettext('Has {} Government Civic').format(civic) def _localize_has_valid_civic(value): civic = _localize(value) return ugettext('Has {} Government Civic').format(civic) def _localize_has_not_valid_civic(value): civic = _localize(value) return ugettext('Does NOT have {} Government Civic').format(civic) def _localize_has_ascension_perk(value): perk = _localize(value) return ugettext('Has {} Ascension Perk').format(perk) def _localize_has_megastructure(value): megastructure = _localize(value) return ugettext('Has Megatructure {}').format(megastructure) def _localize_has_policy_flag(value): try: policy_flag = _localize(value) except (KeyError): policy_flag = _localize(value+'_name') return ugettext('Has {} Policy').format(policy_flag) def _localize_has_trait(value): trait = _localize(value) return ugettext('Has {} Trait').format(trait) def _localize_pop_has_trait(value): trait = _localize(value) return ugettext('Pop in empire has {} trait').format(trait) def _localize_has_authority(value): authority = _localize(value) return ugettext('Has {} Authority').format(authority) def _localize_host_has_dlc(dlc): # dlc = _localize(value) return ugettext('Host does has the {} DLC').format(dlc) def _localize_host_has_not_dlc(dlc): # dlc = _localize(value) return ugettext('Host does NOT have the {} DLC').format(dlc) def _localize_has_technology(value): try: technology = _localize(value) except KeyError: technology = value return ugettext('Has {} Technology').format(technology) def _localize_has_not_technology(value): try: technology = _localize(value) except KeyError: technology = value return ugettext('Does NOT have {} Technology').format(technology) def _localize_has_modifier(value): modifier = _localize(value) return ugettext('Has the {} modifier').format(modifier) def _localize_has_not_modifier(value): modifier = _localize(value) return ugettext('Does NOT have the {} modifier').format(modifier) def _localize_is_country_type(value): return ugettext('Is of the {} country type').format(ugettext(value)) def _localize_ideal_planet_class(value): return ugettext('Is ideal class').format(value) def _localize_is_planet_class(value): planet_class = _localize(value) return ugettext('Is {}').format(planet_class) def _localize_has_government(value): government = _localize(value) return ugettext('Has {}').format(government) def _localize_is_colony(value): return ugettext('Is a Colony') if value == 'yes' \ else ugettext('Is NOT a Colony') def _localize_allows_slavery(value): return ugettext('Allows Slavery') if value == 'yes' \ else ugettext('Does NOT allow Slavery') def _localize_has_federation(value): return ugettext('Is in a Federation') if value == 'yes' \ else ugettext('Is NOT in a Federation') def _localize_num_owned_planets(value): operator, value = _operator_and_value(value) return ugettext('Number of owned planets is {} {}').format(operator, value) def _localize_count_owned_pops(value): operator, value = _operator_and_value(value[1]['count']) return ugettext('Number of enslaved planets {} {}').format(operator, value) def _localize_num_communications(value): operator, value = _operator_and_value(value) return ugettext('Number of owned planets is {} {}').format(operator, value) def _localize_has_communications(value): return ugettext('Has communications with your Empire') def _localize_is_ai(value): return ugettext('Is AI controlled') if value == 'yes' else ugettext('Is NOT AI controlled') def _localize_is_same_species(value): localized_value = 'Dominant' \ if value.lower() == 'root' \ else _localize(value) return ugettext('Is of the {} Species').format(localized_value) def _localize_is_species(value): localized_value = 'Dominant' \ if value.lower() == 'root' \ else _localize(value) article = 'an' if localized_value[0].lower() in 'aeiou' else 'a' return ugettext('Is {} {}').format(article, localized_value) def _localize_is_species_class(value): localized_value = _localize(value) article = 'an' if localized_value[0].lower() in 'aeiou' else 'a' return ugettext('Is {} {}').format(article, localized_value) def _localize_is_enslaved(value): return ugettext('Pop is enslaved') if value == 'yes' else ugettext('Pop is NOT enslaved') def _localize_years_passed(value): operator, value = _operator_and_value(value) return ugettext('Number of years since game start is {} {}').format(operator, value) def _localize_not_years_passed(value): operator, value = _operator_and_value(value) return ugettext('Number of years since game start is NOT {} {}').format(operator, value) def _localize_has_country_flag(value): return ugettext('Has {} country flag').format(value) def _localize_has_not_country_flag(value): return ugettext('Does NOT have {} country flag').format(value) def _localize_research_leader(values, negated=False): leader = ugettext('Research Leader ({})').format( _expert(values[0]['area'].title())) if negated: leader = ugettext('NOT ') + leader localized_conditions = [] for condition in values[1:]: key = list(condition)[0] value = condition[key] localized_condition = { 'has_trait': lambda: _localize_has_expertise(value), 'has_level': lambda: _localize_has_level(value), 'OR': lambda: _localize_or(values) }[key]() localized_conditions.append(localized_condition) return {leader: localized_conditions} def _localize_not_research_leader(values): return _localize_research_leader(values, negated=True) def _localize_has_level(value): operator, level = _operator_and_value(value) return ugettext('Skill level is {} {}').format(operator, level) def _localize_has_expertise(value): expertise = _localize(value) if expertise.find(':') != -1 or expertise.find(':') != -1: colon_loc = 1 + expertise.find(':') + expertise.find(':') truncated = expertise.replace(expertise[0:colon_loc+1], '') condition = ugettext('Is {} Expert').format(truncated) else: condition = ugettext('Is {}').format(expertise) return condition def _localize_area(value): # WTF is this? # Version 1.3 and earlier return ugettext('physics') if value == 'physics' else '' def _localize_any_system_within_border(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any System within Borders'): parsed_values} def _localize_not_any_system_within_border(values): # Version 1.3 and earlier parsed_values = [_parse_condition(value) for value in values] return {ugettext('NOT Any System within Borders'): parsed_values} def _localize_is_in_cluster(value): return ugettext('Is in a {} Cluster').format(value) def _localize_any_country(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Country'): parsed_values} def _localize_any_relation(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Relation'): parsed_values} def _localize_any_owned_pop(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any empire Pop'): parsed_values} def _localize_not_any_owned_pop(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('NOT any owned Pop'): parsed_values} def _localize_any_pop(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Pop'): parsed_values} def _localize_any_owned_planet(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any owned Planet'): parsed_values} def _localize_any_planet(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Planet'): parsed_values} def _localize_not_any_owned_planet(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('NOT any owned Planet'): parsed_values} def _localize_any_planet_within_border(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Planet within Borders'): parsed_values} def _localize_any_system_planet(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any System Planet'): parsed_values} def _localize_any_tile(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Tile'): parsed_values} def _localize_has_blocker(value): blocker = _localize(value) return ugettext('Has {} Tile Blocker').format(blocker) def _localize_any_neighbor_country(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any Neighbor Country'): parsed_values} def _localize_has_resource(value): resource, amount = value[0]['type'], value[1]['amount'] operator, amount = _operator_and_value(amount) localized_resource = _localize(resource) return ugettext('Has {} {} {}').format(operator, amount, localized_resource) def _localize_has_not_resource(value): resource, amount = value[0]['type'], value[1]['amount'] operator, amount = _operator_and_value(amount) localized_resource = _localize(resource) return ugettext('Does NOT have {} {} {}').format(operator, amount, localized_resource) def _localize_has_any_megastructure_in_empire(value): return ugettext('Has any megastructure in empire') if value == 'yes' else ugettext('Does NOT have any megastructure in empire') def _localize_is_ftl_restricted(value): return ugettext('Is FTL restricted') if value == 'yes' else ugettext('Is NOT FTL restricted') def _localize_has_not_authority(value): localized_machine_intelligence = _localize(value) return ugettext('{} has NOT authority').format(localized_machine_intelligence) def _localize_has_not_policy_flag(value): # Version 1.3 and earlier return ugettext('{} Slavery for all species').format(_localize(value)) def _localize_not_is_same_species(value): # ROOT # I don't know what's this, I didn't play launch version before # Version 1.0 only return ugettext('Is NOT same species with ROOT species') def _localize_has_building(value): # Alpha Mod return ugettext('Has building {}').format(_localize(value)) def _localize_empire_has_not_sr_dark_matter(value): # Alpha Mod return ugettext('Does NOT have any dark matter in empire') if value == 'yes' else ugettext('Has any dark matter in empire') def _localize_has_not_tradition(value): # Alpha Mod return ugettext('Does NOT have {} tradition').format(_localize(value)) def _localize_is_playable_country(value): # New Ship Classes Mod return ugettext('Is playable country') if value == 'yes' else ugettext('Is NOT playable country') def _localize_mid_game_years_passed(value): # New Ship Classes 2 Mod operator, value = _operator_and_value(value) return ugettext('Number of years since mid game is {} {}').format(operator, value) def _localize_is_nomadic_empire(value): # Star Trek: New Horizons Mod return ugettext('Is Nomadic Empire') if value == 'yes' else ugettext('Is NOT Nomadic Empire') def _localize_tos_era_or_higher(value): # Star Trek: New Horizons Mod return ugettext('TOS era or higher') if value == 'yes' else ugettext('NOT TOS era or higher') def _localize_tmp_era_or_higher(value): # Star Trek: New Horizons Mod return ugettext('TMP era or higher') if value == 'yes' else ugettext('NOT TMP era or higher') def _localize_has_non_standard_ships(value): # Star Trek: New Horizons Mod return ugettext('Has non-standard ships') if value == 'yes' else ugettext('Does NOT have standard ships') def _localize_is_borg_empire(value): # Star Trek: New Horizons Mod return ugettext('Is borg empire') if value == 'yes' else ugettext('Is NOT borg empire') def _localize_has_unique_fighter_variant(value): # Star Trek: New Horizons Mod return ugettext('Has unique fighter variant') if value == 'yes' else ugettext('Does NOT have fighter variant') def _localize_is_temporal_masters(value): # Star Trek: New Horizons Mod return ugettext('Is temporal masters') if value == 'yes' else ugettext('Is NOT teporal masters') def _localize_uses_polaron_torp(value): # Star Trek: New Horizons Mod return ugettext('Uses polaron torp') if value == 'yes' else ugettext('Does NOT use polaron torp') def _localize_uses_quantum_torp(value): # Star Trek: New Horizons Mod return ugettext('Uses quantum torp') if value == 'yes' else ugettext('Does NOT use quantum torp') def _localize_uses_photonic_torpedo(value): # Star Trek: New Horizons Mod return ugettext('Uses photonic torpedo') if value == 'yes' else ugettext('Does NOT use photonic torpedo') def _localize_uses_plasma_torpedo(value): # Star Trek: New Horizons Mod return ugettext('Uses plasma torpedo') if value == 'yes' else ugettext('Does NOT use plasma torpedo') def _localize_uses_warp_cores(value): # Star Trek: New Horizons Mod return ugettext('Uses warp cores') if value == 'yes' else ugettext('Does NOT use warp cores') def _localize_uses_cloaks(value): # Star Trek: New Horizons Mod return ugettext('Uses cloaks') if value == 'yes' else ugettext('Does NOT use cloaks') def _localize_uses_phaser_weapons_any(value): # Star Trek: New Horizons Mod return ugettext('Uses any kind of phaser weapons') if value == 'yes' else ugettext('Does NOT use any kind of phaser weapons') def _localize_uses_disruptor_weapons_any(value): # Star Trek: New Horizons Mod return ugettext('Uses any kind of disruptor weapons') if value == 'yes' else ugettext('Does NOT use any kind of disruptor weapons') def _localize_uses_antiproton_weapons_any(value): # Star Trek: New Horizons Mod return ugettext('Uses any kind of antiproton weapons') if value == 'yes' else ugettext('Does NOT use any kind of antiproton weapons') def _localize_uses_tetryon_weapons_any(value): # Star Trek: New Horizons Mod return ugettext('Uses any kind of tetryon weapons') if value == 'yes' else ugettext('Does NOT use any kind of tetryon weapons') def _localize_has_no_factions(value): # Star Trek: New Horizons Mod return ugettext('Does NOT have any factions') if value == 'yes' else ugettext('Has any factions') def _localize_is_machine_cybernetic_empire(value): # Star Trek: New Horizons Mod return ugettext('Is machine cybernetic empire') if value == 'yes' else ugettext('Is NOT machine cybernetic empire') def _localize_has_espionage_agency(value): # Star Trek: New Horizons Mod return ugettext('Has espionage agency') if value == 'yes' else ugettext('Does NOT have espionage agency') def _localize_can_not_use_cloning(value): # Star Trek: New Horizons Mod return ugettext('Can NOT use cloning') if value == 'yes' else ugettext('Can use cloning') def _localize_is_non_humanoid(value): # Star Trek: New Horizons Mod return ugettext('Is non-humanoid') if value == 'yes' else ugettext('Is humanoid') def _localize_is_master_geneticist(value): # Star Trek: New Horizons Mod return ugettext('Is master geneticist') if value == 'yes' else ugettext('Is NOT master geneticist') def _localize_empire_can_not_study_gagarin(value): # Star Trek: New Horizons Mod return ugettext('Empire can NOT study gagarin') if value == 'yes' else ugettext('Empire can study gagarin') def _localize_empire_can_study_psionic_techs(value): # Star Trek: New Horizons Mod return ugettext('Empire can NOT study psionic techs') if value == 'yes' else ugettext('Empire can study psionic techs') def _localize_is_terran_empire(value): # Star Trek: New Horizons Mod return ugettext('Is terran empire') if value == 'yes' else ugettext('Is NOT terran empire') def _localize_terran_empire_met_kelpien(value): # Star Trek: New Horizons Mod return ugettext('Has terran empire met kelpien') if value == 'yes' else ugettext('Has NOT terran empire met kelpien') def _localize_empire_met_borg(value): # Star Trek: New Horizons Mod return ugettext('Empire has met borg') if value == 'yes' else ugettext('Empire has NOT met borg') def _localize_is_united_earth(value): # Star Trek: New Horizons Mod return ugettext('Is united earth') if value == 'yes' else ugettext('Is NOT united earth') def _localize_is_non_standard_colonization(value): # Star Trek: New Horizons Mod return ugettext('Is non-standard colonization') if value == 'yes' else ugettext('Is standard colonization') def _localize_has_not_government(value): return ugettext('Does NOT have {}').format(_localize(value)) def _localize_has_not_civic(value): return ugettext('Does NOT have {} civic').format(_localize(value)) def _localize_has_tradition(value): return ugettext('Has {} tradition').format(_localize(value)) def _localize_is_sapient(value): return ugettext('This Species is pre-sapient') if value == 'yes' \ else ugettext('This Species is NOT pre-sapient') def _localize_has_not_seen_any_bypass(value): return ugettext('Has NOT encountered a {}').format(_localize(value)) def _localize_has_seen_any_bypass(value): return ugettext('Has encountered a {}').format(_localize(value)) def _localize_not_owns_any_bypass(value): return ugettext('Does NOT control any system with a {}').format(_localize(value)) def _localize_owns_any_bypass(value): return ugettext('Controls a system with a {}').format(_localize(value)) def _localize_federation(value): federation_perk = _localize(value[0]['has_federation_perk']) technology = _localize(value[1]['any_member'][0]['has_technology']) return [ugettext('Has {} Federation Perk').format(federation_perk), ugettext('Any member has {} technology').format(technology)] def _localize_is_galactic_community_member(value): return ugettext('Is galactic community member') if value == 'yes' \ else ugettext('Is NOT galactic community member') def _localize_has_origin(value): return ugettext('Has origin {}').format(_localize(value)) def _localize_is_pacifist(value): return ugettext('Is some degree of Pacifist') if value == 'yes' \ else ugettext('Is NOT some degree of Pacifist') def _localize_is_militarist(value): return ugettext('Is some degree of Militarist') if value == 'yes' \ else ugettext('Is NOT some degree of Militarist') def _localize_is_materialist(value): return ugettext('Is some degree of Materialist') if value == 'yes' \ else ugettext('Is NOT some degree of Materialist') def _localize_is_spiritualist(value): return ugettext('Is some degree of Spiritualist') if value == 'yes' \ else ugettext('Is NOT some degree of Spiritualist') def _localize_is_xenophile(value): return ugettext('Is some degree of Xenophile') if value == 'yes' \ else ugettext('Is NOT some degree of Xenophile') def _localize_is_xenophobe(value): return ugettext('Is some degree of Xenophobe') if value == 'yes' \ else ugettext('Is NOT some degree of Xenophobe') def _localize_is_egalitarian(value): return ugettext('Is some degree of Egalitarian') if value == 'yes' \ else ugettext('Is NOT some degree of Egalitarian') def _localize_is_authoritarian(value): return ugettext('Is some degree of Authoritarian') if value == 'yes' \ else ugettext('Is NOT some degree of Authoritarian') def _localize_count_starbase_sizes(value): starbase_size = _localize(value[0]['starbase_size']) operator, value = _operator_and_value(value[1]['count']) return ugettext('Number of Starbase {} is {} {}').format(starbase_size, operator, value) def _localize_is_machine_empire(value): return ugettext('Is machine empire') if value == 'yes' else ugettext('Is NOT machine empire') def _localize_is_lithoid_empire(value): return ugettext('Is lithoid empire') if value == 'yes' else ugettext('Is NOT lithoid empire') def _localize_num_districts(value): district_type_key = value[0].get('type') district_type = _localize(district_type_key) operator, value = _operator_and_value(value[1].get('value')) return ugettext('Number of {} districts is {} {}').format(district_type, operator, value) def _localize_has_deposit(value): deposit = _localize(value) return ugettext('Has deposit {}').format(deposit) def _localize_has_not_ancrel(value): return ugettext('Does NOT have any ancrel') if value == 'yes' else ugettext('Has ancrel') def _localize_always(value): return ugettext('Always') if value == 'yes' else ugettext('Never') def _localize_and(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('All of the following'): parsed_values} def _localize_or(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('Any of the following'): parsed_values} def _localize_nor(values): parsed_values = [_parse_condition(value) for value in values] return {ugettext('None of the following'): parsed_values} def _localize_not(value): key = list(value[0])[0] nested_value = value[0][key] if key == 'OR': # Redirect to localization of NOR: negation = _parse_condition({'NOR': nested_value}) else: negated_key = key.replace('has_', 'has_not_') if 'has_' in key \ else 'not_' + key negated_condition = {negated_key: value[0][key]} negation = _parse_condition(negated_condition) return negation def _operator_and_value(data): if type(data) is int: operator = ugettext('equal to') value = data elif type(data) is dict: operator = { '>': ugettext('greater than'), '<': ugettext('less than'), '>=': ugettext('greater than or equal to'), '<=': ugettext('less than or equal to') }[list(data)[0]] value = next(iter(data.values())) return operator, value
gpl-2.0
NolanZhao/OpenBazaar
dht/kbucket.py
13
11478
""" Collection of KBucket classes storing contacts for RoutingTables. Classes: KBucket -- A classic k-bucket. CachingKBucket -- A KBucket with a replacement cache. Exceptions: FullBucketError -- Raised when adding a contact to a full KBucket. """ import collections import functools from dht import constants, util class FullBucketError(Exception): """Raised when attempting to add a contact in a full KBucket.""" pass class KBucket(collections.Sequence): """A simple k-bucket for a Kademlia routing table.""" def __init__(self, range_min, range_max): """ Make a new KBucket with the specified range. Args: range_min, range_max: The lower and upper limits for the range in the ID space covered by this KBucket, as integers. This is a half-open range: [range_min, range_max) """ self.last_accessed = util.now() self.range_min = range_min self.range_max = range_max # This list of contacts implements an LRU-protocol: # Fresh or updated contacts are near the tail of the list, # while stale contacts are near the head. self._contacts = [] # pylint: disable=no-self-argument # pylint: disable=not-callable def _touch(func): """ Update the `last_accessed` attribute upon method invocation. """ @functools.wraps(func) def updating_method(self, *args, **kwargs): self.last_accessed = util.now() return func(self, *args, **kwargs) return updating_method def __getitem__(self, key): return self._contacts[key] def __len__(self): return len(self._contacts) @_touch def add_contact(self, contact): """ Add a contact to the contact list. The new contact is always appended to the contact list after removing any prior occurences of the same contact. This is the intended behaviour; the fresh contact may have updated add-on data (e.g. optimization-specific stuff). Args: contact: The contact to add, as a contact.Contact. Raises: FullBucketError: The bucket is full and the contact to add is not already in it. """ assert self.contact_in_range(contact), 'Wrong KBucket.' try: self._contacts.remove(contact) except ValueError: pass if len(self._contacts) < constants.K: self._contacts.append(contact) else: raise FullBucketError('No space in bucket to insert contact') @_touch def get_contact(self, guid): """ Return the contact with the specified guid or None if not present. Args: guid: The guid to search for, as a string or a unicode, in hexadecimal. Returns: A contact.Contact with the given guid or None """ for contact in self._contacts: if contact.guid == guid: return contact return None @_touch def get_contacts(self, count=-1, excluded_guid=None): """ Return a list of contacts from the KBucket. Args: count: The amount of contacts to return, as an int; if negative, return all contacts. excluded_guid: A guid to exclude, as a string or unicode; if a contact with this guid is in the list of returned values, it will be discarded before returning. Returns: List of (at most) `count` contacts from the contact list. This amount is capped by the available contacts and the bucket size, of course. Newer contacts are preferred. If no contacts are present, an empty list is returned. """ current_len = len(self._contacts) if current_len == 0 or count == 0: return [] if count < 0: count = current_len else: count = min(count, current_len) if excluded_guid is None: # Get the last `count` contacts. contact_list = self._contacts[-count:] else: contact_list = [] for contact in reversed(self._contacts): if contact.guid == excluded_guid: continue contact_list.append(contact) if len(contact_list) >= count: break return contact_list @_touch def remove_contact(self, contact): """ Remove given contact from contact list. Args: contact: The contact to remove, as a contact.Contact. If no such contact exists, do nothing. """ try: self._contacts.remove(contact) except ValueError: pass @_touch def remove_guid(self, guid): """ Remove contact with given guid from contact list. Args: guid: The guid of the contact that we want removed, as a string or unicode, in hexadecimal. If no such contact exists, do nothing. """ self._contacts = [ contact for contact in self._contacts if contact.guid != guid ] def split_kbucket(self): """ Split the high half of this KBucket's range and assign it to a new KBucket. Relocate all relevant contacts to the new KBucket. Note: If multiple threads attempt to split the same KBucket, data corruption may occur. Returns: The new KBucket, which covers the high part of the halved ID space. """ cur_range_size = self.range_max - self.range_min half_point = self.range_min + cur_range_size // 2 # Ensure no empty range is created. assert self.range_min < half_point < self.range_max # Make the instantiation dependent on the actual class, # for easy inheritance. new_kbucket = self.__class__(half_point, self.range_max) # Halve the ID space of the split KBucket. self.range_max = half_point # Split the contact list into two, according to the new ranges. self._contacts, new_kbucket._contacts = util.partition( self._contacts, self.contact_in_range ) return new_kbucket def contact_in_range(self, contact): """ Test whether the given contact is in the range of the ID space covered by this KBucket. Args: contact: The contact to test, as contact.Contact Returns: True if `contact` is in this KBucket's range, False otherwise. """ return self.guid_in_range(contact.guid) def guid_in_range(self, guid): """ Test whether the given guid is in the range of the ID space covered by this KBucket. Args: guid: The guid to test, as a string or unicode, in hexadecimal. Returns: True if `guid` is in this KBucket's range, False otherwise. """ return self.range_min <= util.guid_to_num(guid) < self.range_max def is_stale(self): return util.now() - self.last_accessed >= constants.REFRESH_TIMEOUT class CachingKBucket(KBucket): """A KBucket with a replacement cache.""" def __init__(self, range_min, range_max): super(CachingKBucket, self).__init__(range_min, range_max) # Cache containing nodes eligible to replace stale entries. # Entries at the tail (right) of the cache are preferred # than entries at the head (left). self._replacement_cache = collections.deque() # pylint: disable=no-self-argument # pylint: disable=not-callable def _touch(func): """ Update the `last_accessed` attribute upon method invocation. """ @functools.wraps(func) def updating_method(self, *args, **kwargs): self.last_accessed = util.now() return func(self, *args, **kwargs) return updating_method @_touch def cache_contact(self, contact): """ Store a contact in the KBucket's replacement cache. Evict any existing contact with the same guid. Args: contact: The contact to cache, as a contact.Contact If the cache is full, `contact` will replace the oldest contact in the cache. """ try: self._replacement_cache.remove(contact) except ValueError: pass self._replacement_cache.append(contact) if len(self._replacement_cache) > constants.CACHE_K: self._replacement_cache.popleft() def get_cached_contacts(self): """ Return all contacts in cache. Returns: A list of all cached contacts, oldest first. """ return list(self._replacement_cache) def remove_contact(self, contact): """ Remove given contact from contact list. Args: contact: The contact to remove, as a contact.Contact. If no such contact exists, do nothing. In any case, refill the main list from the cache. """ super(CachingKBucket, self).remove_contact(contact) self.fill_from_cache() def remove_guid(self, guid): """ Remove contact with given guid from contact list. Args: guid: The guid of the contact that we want removed, as a string or unicode. If no such contact exists, do nothing. In any case, refill the main list from the cache. """ super(CachingKBucket, self).remove_guid(guid) self.fill_from_cache() def split_kbucket(self): """ Split the high half of this KBucket's range and assign it to a new KBucket. Relocate all relevant contacts to the new KBucket. Note: If multiple threads attempt to split the same KBucket, the operation may cause data corruption. Returns: The new KBucket, which covers the high part of the halved ID space. In addition to splitting the contacts, this method also splits the cache of the existing bucket, according to the guids. Then, it refills the contact lists from the caches. """ new_kbucket = super(CachingKBucket, self).split_kbucket() cache_self, cache_new = util.partition( self._replacement_cache, self.contact_in_range ) # Replacement caches are deques, so we can't directly assign # the values returned by partition. new_kbucket._replacement_cache.extend(cache_new) self._replacement_cache.clear() self._replacement_cache.extend(cache_self) self.fill_from_cache() new_kbucket.fill_from_cache() return new_kbucket @_touch def fill_from_cache(self): """ Move contacts from the cache to the main list, until the cache is exhausted or the main list is full. """ move_count = min( len(self._replacement_cache), constants.K - len(self._contacts) ) for _ in range(move_count): self.add_contact(self._replacement_cache.pop())
mit
ResearchSoftwareInstitute/MyHPOM
hs_access_control/migrations/0012_auto_disallow_nulls.py
3
3853
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('hs_access_control', '0011_auto_rename_new_fields_to_original_names'), ] operations = [ migrations.AlterField( model_name='groupaccess', name='group', field=models.OneToOneField(related_query_name=b'gaccess', related_name='gaccess', editable=False, to='auth.Group', help_text=b'group object that this object protects'), preserve_default=True, ), migrations.AlterField( model_name='groupresourceprivilege', name='grantor', field=models.ForeignKey(related_name='x2grp', editable=False, to=settings.AUTH_USER_MODEL, help_text=b'grantor of privilege'), preserve_default=True, ), migrations.AlterField( model_name='groupresourceprivilege', name='group', field=models.ForeignKey(related_name='g2grp', editable=False, to='auth.Group', help_text=b'group to be granted privilege'), preserve_default=True, ), migrations.AlterField( model_name='groupresourceprivilege', name='resource', field=models.ForeignKey(related_name='r2grp', editable=False, to='hs_core.BaseResource', help_text=b'resource to which privilege applies'), preserve_default=True, ), migrations.AlterField( model_name='resourceaccess', name='resource', field=models.OneToOneField(related_query_name=b'raccess', related_name='raccess', editable=False, to='hs_core.BaseResource'), preserve_default=True, ), migrations.AlterField( model_name='useraccess', name='user', field=models.OneToOneField(related_query_name=b'uaccess', related_name='uaccess', editable=False, to=settings.AUTH_USER_MODEL), preserve_default=True, ), migrations.AlterField( model_name='usergroupprivilege', name='grantor', field=models.ForeignKey(related_name='x2ugp', editable=False, to=settings.AUTH_USER_MODEL, help_text=b'grantor of privilege'), preserve_default=True, ), migrations.AlterField( model_name='usergroupprivilege', name='group', field=models.ForeignKey(related_name='g2ugp', editable=False, to='auth.Group', help_text=b'group to which privilege applies'), preserve_default=True, ), migrations.AlterField( model_name='usergroupprivilege', name='user', field=models.ForeignKey(related_name='u2ugp', editable=False, to=settings.AUTH_USER_MODEL, help_text=b'user to be granted privilege'), preserve_default=True, ), migrations.AlterField( model_name='userresourceprivilege', name='grantor', field=models.ForeignKey(related_name='x2urp', editable=False, to=settings.AUTH_USER_MODEL, help_text=b'grantor of privilege'), preserve_default=True, ), migrations.AlterField( model_name='userresourceprivilege', name='resource', field=models.ForeignKey(related_name='r2urp', editable=False, to='hs_core.BaseResource', help_text=b'resource to which privilege applies'), preserve_default=True, ), migrations.AlterField( model_name='userresourceprivilege', name='user', field=models.ForeignKey(related_name='u2urp', editable=False, to=settings.AUTH_USER_MODEL, help_text=b'user to be granted privilege'), preserve_default=True, ), ]
bsd-3-clause
philsch/ansible
lib/ansible/modules/cloud/rackspace/rax_clb_nodes.py
66
8727
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_clb_nodes short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer description: - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer version_added: "1.4" options: address: required: false description: - IP address or domain name of the node condition: required: false choices: - enabled - disabled - draining description: - Condition for the node, which determines its role within the load balancer load_balancer_id: required: true description: - Load balancer id node_id: required: false description: - Node id port: required: false description: - Port number of the load balanced service on the node state: required: false default: "present" choices: - present - absent description: - Indicate desired state of the node type: required: false choices: - primary - secondary description: - Type of node wait: required: false default: "no" choices: - "yes" - "no" description: - Wait for the load balancer to become active before returning wait_timeout: required: false default: 30 description: - How long to wait before giving up and returning an error weight: required: false description: - Weight of node author: "Lukasz Kawczynski (@neuroid)" extends_documentation_fragment: rackspace ''' EXAMPLES = ''' # Add a new node to the load balancer - local_action: module: rax_clb_nodes load_balancer_id: 71 address: 10.2.2.3 port: 80 condition: enabled type: primary wait: yes credentials: /path/to/credentials # Drain connections from a node - local_action: module: rax_clb_nodes load_balancer_id: 71 node_id: 410 condition: draining wait: yes credentials: /path/to/credentials # Remove a node from the load balancer - local_action: module: rax_clb_nodes load_balancer_id: 71 node_id: 410 state: absent wait: yes credentials: /path/to/credentials ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False def _activate_virtualenv(path): path = os.path.expanduser(path) activate_this = os.path.join(path, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) def _get_node(lb, node_id=None, address=None, port=None): """Return a matching node""" for node in getattr(lb, 'nodes', []): match_list = [] if node_id is not None: match_list.append(getattr(node, 'id', None) == node_id) if address is not None: match_list.append(getattr(node, 'address', None) == address) if port is not None: match_list.append(getattr(node, 'port', None) == port) if match_list and all(match_list): return node return None def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( address=dict(), condition=dict(choices=['enabled', 'disabled', 'draining']), load_balancer_id=dict(required=True, type='int'), node_id=dict(type='int'), port=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), type=dict(choices=['primary', 'secondary']), virtualenv=dict(), wait=dict(default=False, type='bool'), wait_timeout=dict(default=30, type='int'), weight=dict(type='int'), ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') address = module.params['address'] condition = (module.params['condition'] and module.params['condition'].upper()) load_balancer_id = module.params['load_balancer_id'] node_id = module.params['node_id'] port = module.params['port'] state = module.params['state'] typ = module.params['type'] and module.params['type'].upper() virtualenv = module.params['virtualenv'] wait = module.params['wait'] wait_timeout = module.params['wait_timeout'] or 1 weight = module.params['weight'] if virtualenv: try: _activate_virtualenv(virtualenv) except IOError as e: module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( virtualenv, e)) setup_rax_module(module, pyrax) if not pyrax.cloud_loadbalancers: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') try: lb = pyrax.cloud_loadbalancers.get(load_balancer_id) except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) node = _get_node(lb, node_id, address, port) result = rax_clb_node_to_dict(node) if state == 'absent': if not node: # Removing a non-existent node module.exit_json(changed=False, state=state) try: lb.delete_node(node) result = {} except pyrax.exc.NotFound: module.exit_json(changed=False, state=state) except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) else: # present if not node: if node_id: # Updating a non-existent node msg = 'Node %d not found' % node_id if lb.nodes: msg += (' (available nodes: %s)' % ', '.join([str(x.id) for x in lb.nodes])) module.fail_json(msg=msg) else: # Creating a new node try: node = pyrax.cloudloadbalancers.Node( address=address, port=port, condition=condition, weight=weight, type=typ) resp, body = lb.add_nodes([node]) result.update(body['nodes'][0]) except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) else: # Updating an existing node mutable = { 'condition': condition, 'type': typ, 'weight': weight, } for name, value in mutable.items(): if value is None or value == getattr(node, name): mutable.pop(name) if not mutable: module.exit_json(changed=False, state=state, node=result) try: # The diff has to be set explicitly to update node's weight and # type; this should probably be fixed in pyrax lb.update_node(node, diff=mutable) result.update(mutable) except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) if wait: pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, attempts=wait_timeout) if lb.status != 'ACTIVE': module.fail_json( msg='Load balancer not active after %ds (current status: %s)' % (wait_timeout, lb.status.lower())) kwargs = {'node': result} if result else {} module.exit_json(changed=True, state=state, **kwargs) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.rax import * # invoke the module if __name__ == '__main__': main()
gpl-3.0
napkindrawing/ansible
lib/ansible/plugins/lookup/ini.py
26
4165
# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re from collections import MutableSequence from io import StringIO from ansible.errors import AnsibleError from ansible.module_utils.six.moves import configparser from ansible.module_utils._text import to_bytes, to_text from ansible.plugins.lookup import LookupBase def _parse_params(term): '''Safely split parameter term to preserve spaces''' keys = ['key', 'type', 'section', 'file', 're', 'default'] params = {} for k in keys: params[k] = '' thiskey = 'key' for idp, phrase in enumerate(term.split()): for k in keys: if ('%s=' % k) in phrase: thiskey = k if idp == 0 or not params[thiskey]: params[thiskey] = phrase else: params[thiskey] += ' ' + phrase rparams = [params[x] for x in keys if params[x]] return rparams class LookupModule(LookupBase): def read_properties(self, filename, key, dflt, is_regexp): config = StringIO() current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') config.write(u'[java_properties]\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict')) config.seek(0, os.SEEK_SET) self.cp.readfp(config) return self.get_value(key, 'java_properties', dflt, is_regexp) def read_ini(self, filename, key, section, dflt, is_regexp): self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict'))) return self.get_value(key, section, dflt, is_regexp) def get_value(self, key, section, dflt, is_regexp): # Retrieve all values from a section using a regexp if is_regexp: return [v for k, v in self.cp.items(section) if re.match(key, k)] value = None # Retrieve a single value try: value = self.cp.get(section, key) except configparser.NoOptionError: return dflt return value def run(self, terms, variables=None, **kwargs): basedir = self.get_basedir(variables) self.basedir = basedir self.cp = configparser.ConfigParser() ret = [] for term in terms: params = _parse_params(term) key = params[0] paramvals = { 'file': 'ansible.ini', 're': False, 'default': None, 'section': "global", 'type': "ini", } # parameters specified? try: for param in params[1:]: name, value = param.split('=') assert(name in paramvals) paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) path = self.find_file_in_search_path(variables, 'files', paramvals['file']) if paramvals['type'] == "properties": var = self.read_properties(path, key, paramvals['default'], paramvals['re']) else: var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re']) if var is not None: if isinstance(var, MutableSequence): for v in var: ret.append(v) else: ret.append(var) return ret
gpl-3.0
GladeRom/android_external_chromium_org
chrome/common/extensions/docs/server2/mock_function_test.py
122
1207
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from mock_function import MockFunction class MockFunctionUnittest(unittest.TestCase): def testMockFunction(self): @MockFunction def calc(a, b, mult=1): return (a + b) * mult self.assertTrue(*calc.CheckAndReset(0)) self.assertEqual( (False, 'calc: expected 1 call(s), got 0'), calc.CheckAndReset(1)) self.assertEqual(20, calc(2, 3, mult=4)) self.assertTrue(*calc.CheckAndReset(1)) self.assertTrue(*calc.CheckAndReset(0)) self.assertEqual(20, calc(2, 3, mult=4)) self.assertEqual( (False, 'calc: expected 0 call(s), got 1'), calc.CheckAndReset(0)) self.assertEqual(3, calc(1, 2)) self.assertEqual(0, calc(3, 4, mult=0)) self.assertTrue(*calc.CheckAndReset(2)) self.assertTrue(*calc.CheckAndReset(0)) self.assertEqual(3, calc(1, 2)) self.assertEqual(0, calc(3, 4, mult=0)) self.assertEqual( (False, 'calc: expected 3 call(s), got 2'), calc.CheckAndReset(3)) if __name__ == '__main__': unittest.main()
bsd-3-clause
israeleriston/scientific-week
backend/venv/lib/python3.5/site-packages/click/_unicodefun.py
136
4204
import os import sys import codecs from ._compat import PY2 # If someone wants to vendor click, we want to ensure the # correct package is discovered. Ideally we could use a # relative import here but unfortunately Python does not # support that. click = sys.modules[__name__.rsplit('.', 1)[0]] def _find_unicode_literals_frame(): import __future__ frm = sys._getframe(1) idx = 1 while frm is not None: if frm.f_globals.get('__name__', '').startswith('click.'): frm = frm.f_back idx += 1 elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: return idx else: break return 0 def _check_for_unicode_literals(): if not __debug__: return if not PY2 or click.disable_unicode_literals_warning: return bad_frame = _find_unicode_literals_frame() if bad_frame <= 0: return from warnings import warn warn(Warning('Click detected the use of the unicode_literals ' '__future__ import. This is heavily discouraged ' 'because it can introduce subtle bugs in your ' 'code. You should instead use explicit u"" literals ' 'for your unicode strings. For more information see ' 'http://click.pocoo.org/python3/'), stacklevel=bad_frame) def _verify_python3_env(): """Ensures that the environment is good for unicode on Python 3.""" if PY2: return try: import locale fs_enc = codecs.lookup(locale.getpreferredencoding()).name except Exception: fs_enc = 'ascii' if fs_enc != 'ascii': return extra = '' if os.name == 'posix': import subprocess rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] good_locales = set() has_c_utf8 = False # Make sure we're operating on text here. if isinstance(rv, bytes): rv = rv.decode('ascii', 'replace') for line in rv.splitlines(): locale = line.strip() if locale.lower().endswith(('.utf-8', '.utf8')): good_locales.add(locale) if locale.lower() in ('c.utf8', 'c.utf-8'): has_c_utf8 = True extra += '\n\n' if not good_locales: extra += ( 'Additional information: on this system no suitable UTF-8\n' 'locales were discovered. This most likely requires resolving\n' 'by reconfiguring the locale system.' ) elif has_c_utf8: extra += ( 'This system supports the C.UTF-8 locale which is recommended.\n' 'You might be able to resolve your issue by exporting the\n' 'following environment variables:\n\n' ' export LC_ALL=C.UTF-8\n' ' export LANG=C.UTF-8' ) else: extra += ( 'This system lists a couple of UTF-8 supporting locales that\n' 'you can pick from. The following suitable locales where\n' 'discovered: %s' ) % ', '.join(sorted(good_locales)) bad_locale = None for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): if locale and locale.lower().endswith(('.utf-8', '.utf8')): bad_locale = locale if locale is not None: break if bad_locale is not None: extra += ( '\n\nClick discovered that you exported a UTF-8 locale\n' 'but the locale system could not pick up from it because\n' 'it does not exist. The exported locale is "%s" but it\n' 'is not supported' ) % bad_locale raise RuntimeError('Click will abort further execution because Python 3 ' 'was configured to use ASCII as encoding for the ' 'environment. Consult http://click.pocoo.org/python3/' 'for mitigation steps.' + extra)
mit
JuliBakagianni/CEF-ELRC
lib/python2.7/site-packages/django/utils/html.py
202
8105
"""HTML utilities suitable for global use.""" import re import string from django.utils.safestring import SafeData, mark_safe from django.utils.encoding import force_unicode from django.utils.functional import allow_lazy from django.utils.http import urlquote # Configuration for urlize() function. LEADING_PUNCTUATION = ['(', '<', '&lt;'] TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '&gt;'] # List of possible strings used for bullets in bulleted lists. DOTS = ['&middot;', '*', '\xe2\x80\xa2', '&#149;', '&bull;', '&#8226;'] unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)') word_split_re = re.compile(r'(\s+)') punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \ ('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]), '|'.join([re.escape(x) for x in TRAILING_PUNCTUATION]))) simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+') html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE) hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL) trailing_empty_content_re = re.compile(r'(?:<p>(?:&nbsp;|\s|<br \/>)*?</p>\s*)+\Z') del x # Temporary variable def escape(html): """ Returns the given HTML with ampersands, quotes and angle brackets encoded. """ return mark_safe(force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;')) escape = allow_lazy(escape, unicode) _base_js_escapes = ( ('\\', r'\u005C'), ('\'', r'\u0027'), ('"', r'\u0022'), ('>', r'\u003E'), ('<', r'\u003C'), ('&', r'\u0026'), ('=', r'\u003D'), ('-', r'\u002D'), (';', r'\u003B'), (u'\u2028', r'\u2028'), (u'\u2029', r'\u2029') ) # Escape every ASCII character with a value less than 32. _js_escapes = (_base_js_escapes + tuple([('%c' % z, '\\u%04X' % z) for z in range(32)])) def escapejs(value): """Hex encodes characters for use in JavaScript strings.""" for bad, good in _js_escapes: value = mark_safe(force_unicode(value).replace(bad, good)) return value escapejs = allow_lazy(escapejs, unicode) def conditional_escape(html): """ Similar to escape(), except that it doesn't operate on pre-escaped strings. """ if isinstance(html, SafeData): return html else: return escape(html) def linebreaks(value, autoescape=False): """Converts newlines into <p> and <br />s.""" value = re.sub(r'\r\n|\r|\n', '\n', force_unicode(value)) # normalize newlines paras = re.split('\n{2,}', value) if autoescape: paras = [u'<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras] else: paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras] return u'\n\n'.join(paras) linebreaks = allow_lazy(linebreaks, unicode) def strip_tags(value): """Returns the given HTML with all tags stripped.""" return re.sub(r'<[^>]*?>', '', force_unicode(value)) strip_tags = allow_lazy(strip_tags) def strip_spaces_between_tags(value): """Returns the given HTML with spaces between tags removed.""" return re.sub(r'>\s+<', '><', force_unicode(value)) strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, unicode) def strip_entities(value): """Returns the given HTML with all entities (&something;) stripped.""" return re.sub(r'&(?:\w+|#\d+);', '', force_unicode(value)) strip_entities = allow_lazy(strip_entities, unicode) def fix_ampersands(value): """Returns the given HTML with all unencoded ampersands encoded correctly.""" return unencoded_ampersands_re.sub('&amp;', force_unicode(value)) fix_ampersands = allow_lazy(fix_ampersands, unicode) def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False): """ Converts any URLs in text into clickable links. Works on http://, https://, www. links and links ending in .org, .net or .com. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text longer than this limit will truncated to trim_url_limit-3 characters and appended with an elipsis. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. If autoescape is True, the link text and URLs will get autoescaped. """ trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x safe_input = isinstance(text, SafeData) words = word_split_re.split(force_unicode(text)) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): match = None if '.' in word or '@' in word or ':' in word: match = punctuation_re.match(word) if match: lead, middle, trail = match.groups() # Make URL we want to point to. url = None if middle.startswith('http://') or middle.startswith('https://'): url = urlquote(middle, safe='/&=:;#?+*') elif middle.startswith('www.') or ('@' not in middle and \ middle and middle[0] in string.ascii_letters + string.digits and \ (middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))): url = urlquote('http://%s' % middle, safe='/&=:;#?+*') elif '@' in middle and not ':' in middle and simple_email_re.match(middle): url = 'mailto:%s' % middle nofollow_attr = '' # Make link. if url: trimmed = trim_url(middle) if autoescape and not safe_input: lead, trail = escape(lead), escape(trail) url, trimmed = escape(url), escape(trimmed) middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed) words[i] = mark_safe('%s%s%s' % (lead, middle, trail)) else: if safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) elif safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) return u''.join(words) urlize = allow_lazy(urlize, unicode) def clean_html(text): """ Clean the given HTML. Specifically, do the following: * Convert <b> and <i> to <strong> and <em>. * Encode all ampersands correctly. * Remove all "target" attributes from <a> tags. * Remove extraneous HTML, such as presentational tags that open and immediately close and <br clear="all">. * Convert hard-coded bullets into HTML unordered lists. * Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the bottom of the text. """ from django.utils.text import normalize_newlines text = normalize_newlines(force_unicode(text)) text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text) text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text) text = fix_ampersands(text) # Remove all target="" attributes from <a> tags. text = link_target_attribute_re.sub('\\1', text) # Trim stupid HTML such as <br clear="all">. text = html_gunk_re.sub('', text) # Convert hard-coded bullets into HTML unordered lists. def replace_p_tags(match): s = match.group().replace('</p>', '</li>') for d in DOTS: s = s.replace('<p>%s' % d, '<li>') return u'<ul>\n%s\n</ul>' % s text = hard_coded_bullets_re.sub(replace_p_tags, text) # Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the bottom # of the text. text = trailing_empty_content_re.sub('', text) return text clean_html = allow_lazy(clean_html, unicode)
bsd-3-clause
chemelnucfin/tensorflow
tensorflow/contrib/boosted_trees/estimator_batch/estimator_utils.py
5
2899
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for converting between core and contrib feature columns.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import model_fn as contrib_model_fn_lib from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.export import export_output _CORE_MODE_TO_CONTRIB_MODE_ = { model_fn_lib.ModeKeys.TRAIN: contrib_model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL: contrib_model_fn_lib.ModeKeys.EVAL, model_fn_lib.ModeKeys.PREDICT: contrib_model_fn_lib.ModeKeys.INFER } def _core_mode_to_contrib_mode(mode): return _CORE_MODE_TO_CONTRIB_MODE_[mode] def _export_outputs_to_output_alternatives(export_outputs): """Converts EstimatorSpec.export_outputs to output_alternatives. Args: export_outputs: export_outputs created by create_estimator_spec. Returns: converted output_alternatives. """ output = {} if export_outputs is not None: for key, value in export_outputs.items(): if isinstance(value, export_output.ClassificationOutput): exported_predictions = { prediction_key.PredictionKey.SCORES: value.scores, prediction_key.PredictionKey.CLASSES: value.classes } output[key] = (constants.ProblemType.CLASSIFICATION, exported_predictions) return output return None def estimator_spec_to_model_fn_ops(estimator_spec, export_alternatives=False): if export_alternatives: alternatives = _export_outputs_to_output_alternatives( estimator_spec.export_outputs) else: alternatives = [] return model_fn.ModelFnOps( mode=_core_mode_to_contrib_mode(estimator_spec.mode), predictions=estimator_spec.predictions, loss=estimator_spec.loss, train_op=estimator_spec.train_op, eval_metric_ops=estimator_spec.eval_metric_ops, output_alternatives=alternatives)
apache-2.0
tfroehlich82/EventGhost
eg/Classes/AddActionGroupDialog.py
2
1500
# -*- coding: utf-8 -*- # # This file is part of EventGhost. # Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/> # # EventGhost is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 2 of the License, or (at your option) # any later version. # # EventGhost is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along # with EventGhost. If not, see <http://www.gnu.org/licenses/>. # Local imports import eg class Text(eg.TranslatableStrings): caption = "Add Actions?" message = ( "EventGhost can add a folder with all actions of this plugin to your " "configuration tree. If you want to do so, select the location where " "it should be added and press OK.\n\n" "Otherwise press the cancel button." ) class AddActionGroupDialog(eg.TreeItemBrowseDialog): def Configure(self, parent=None): eg.TreeItemBrowseDialog.Configure( self, Text.caption, Text.message, searchItem=None, resultClasses=(eg.FolderItem, eg.RootItem), filterClasses=(eg.FolderItem, ), parent=parent, )
gpl-2.0
affansyed/bcc
tools/old/opensnoop.py
9
2737
#!/usr/bin/python # @lint-avoid-python-3-compatibility-imports # # opensnoop Trace open() syscalls. # For Linux, uses BCC, eBPF. Embedded C. # # USAGE: opensnoop [-h] [-t] [-x] [-p PID] # # Copyright (c) 2015 Brendan Gregg. # Licensed under the Apache License, Version 2.0 (the "License") # # 17-Sep-2015 Brendan Gregg Created this. from __future__ import print_function from bcc import BPF import argparse # arguments examples = """examples: ./opensnoop # trace all open() syscalls ./opensnoop -t # include timestamps ./opensnoop -x # only show failed opens ./opensnoop -p 181 # only trace PID 181 """ parser = argparse.ArgumentParser( description="Trace open() syscalls", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=examples) parser.add_argument("-t", "--timestamp", action="store_true", help="include timestamp on output") parser.add_argument("-x", "--failed", action="store_true", help="only show failed opens") parser.add_argument("-p", "--pid", help="trace this PID only") args = parser.parse_args() debug = 0 # define BPF program bpf_text = """ #include <uapi/linux/ptrace.h> BPF_HASH(args_filename, u32, const char *); int kprobe__sys_open(struct pt_regs *ctx, const char __user *filename) { u32 pid = bpf_get_current_pid_tgid(); FILTER args_filename.update(&pid, &filename); return 0; }; int kretprobe__sys_open(struct pt_regs *ctx) { const char **filenamep; int ret = ctx->ax; u32 pid = bpf_get_current_pid_tgid(); filenamep = args_filename.lookup(&pid); if (filenamep == 0) { // missed entry return 0; } bpf_trace_printk("%d %s\\n", ret, *filenamep); args_filename.delete(&pid); return 0; } """ if args.pid: bpf_text = bpf_text.replace('FILTER', 'if (pid != %s) { return 0; }' % args.pid) else: bpf_text = bpf_text.replace('FILTER', '') if debug: print(bpf_text) # initialize BPF b = BPF(text=bpf_text) # header if args.timestamp: print("%-14s" % ("TIME(s)"), end="") print("%-6s %-16s %4s %3s %s" % ("PID", "COMM", "FD", "ERR", "PATH")) start_ts = 0 # format output while 1: (task, pid, cpu, flags, ts, msg) = b.trace_fields() (ret_s, filename) = msg.split(" ", 1) ret = int(ret_s) if (args.failed and (ret >= 0)): continue # split return value into FD and errno columns if ret >= 0: fd_s = ret err = 0 else: fd_s = "-1" err = - ret # print columns if args.timestamp: if start_ts == 0: start_ts = ts print("%-14.9f" % (ts - start_ts), end="") print("%-6d %-16s %4s %3s %s" % (pid, task, fd_s, err, filename))
apache-2.0
valkjsaaa/sl4a
python/src/Lib/test/test_telnetlib.py
61
2162
import socket import threading import telnetlib import time from unittest import TestCase from test import test_support HOST = test_support.HOST def server(evt, serv): serv.listen(5) evt.set() try: conn, addr = serv.accept() except socket.timeout: pass finally: serv.close() evt.set() class GeneralTests(TestCase): def setUp(self): self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(3) self.port = test_support.bind_port(self.sock) threading.Thread(target=server, args=(self.evt,self.sock)).start() self.evt.wait() self.evt.clear() time.sleep(.1) def tearDown(self): self.evt.wait() def testBasic(self): # connects telnet = telnetlib.Telnet(HOST, self.port) telnet.sock.close() def testTimeoutDefault(self): self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: telnet = telnetlib.Telnet("localhost", self.port) finally: socket.setdefaulttimeout(None) self.assertEqual(telnet.sock.gettimeout(), 30) telnet.sock.close() def testTimeoutNone(self): # None, having other default self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: telnet = telnetlib.Telnet(HOST, self.port, timeout=None) finally: socket.setdefaulttimeout(None) self.assertTrue(telnet.sock.gettimeout() is None) telnet.sock.close() def testTimeoutValue(self): telnet = telnetlib.Telnet("localhost", self.port, timeout=30) self.assertEqual(telnet.sock.gettimeout(), 30) telnet.sock.close() def testTimeoutOpen(self): telnet = telnetlib.Telnet() telnet.open("localhost", self.port, timeout=30) self.assertEqual(telnet.sock.gettimeout(), 30) telnet.sock.close() def test_main(verbose=None): test_support.run_unittest(GeneralTests) if __name__ == '__main__': test_main()
apache-2.0
tengyifei/grpc
test/core/bad_ssl/gen_build_yaml.py
1
3779
#!/usr/bin/env python2.7 # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Generates the appropriate build.json data for all the end2end tests.""" import collections import yaml TestOptions = collections.namedtuple('TestOptions', 'flaky cpu_cost') default_test_options = TestOptions(False, 1.0) # maps test names to options BAD_CLIENT_TESTS = { 'cert': default_test_options._replace(cpu_cost=0.1), 'alpn': default_test_options._replace(cpu_cost=0.1), } def main(): json = { '#': 'generated with test/bad_ssl/gen_build_json.py', 'libs': [ { 'name': 'bad_ssl_test_server', 'build': 'private', 'language': 'core', 'src': ['test/core/bad_ssl/server_common.c'], 'headers': ['test/core/bad_ssl/server_common.h'], 'vs_proj_dir': 'test', 'platforms': ['linux', 'posix', 'mac'], 'deps': [ 'grpc_test_util', 'grpc', 'gpr_test_util', 'gpr' ] } ], 'targets': [ { 'name': 'bad_ssl_%s_server' % t, 'build': 'test', 'language': 'core', 'run': False, 'src': ['test/core/bad_ssl/servers/%s.c' % t], 'vs_proj_dir': 'test/bad_ssl', 'platforms': ['linux', 'posix', 'mac'], 'deps': [ 'bad_ssl_test_server', 'grpc_test_util', 'grpc', 'gpr_test_util', 'gpr' ] } for t in sorted(BAD_CLIENT_TESTS.keys())] + [ { 'name': 'bad_ssl_%s_test' % t, 'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost, 'build': 'test', 'language': 'core', 'src': ['test/core/bad_ssl/bad_ssl_test.c'], 'vs_proj_dir': 'test', 'platforms': ['linux', 'posix', 'mac'], 'deps': [ 'grpc_test_util', 'grpc', 'gpr_test_util', 'gpr' ] } for t in sorted(BAD_CLIENT_TESTS.keys())]} print yaml.dump(json) if __name__ == '__main__': main()
bsd-3-clause
bertrand-l/numpy
tools/npy_tempita/compat3.py
42
1094
from __future__ import absolute_import, division, print_function import sys __all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode', 'iteritems'] PY3 = True if sys.version_info[0] == 3 else False if sys.version_info[0] < 3: def next(obj): return obj.next() def iteritems(d, **kw): return d.iteritems(**kw) b = bytes = str basestring_ = basestring else: def b(s): if isinstance(s, str): return s.encode('latin1') return bytes(s) def iteritems(d, **kw): return iter(d.items(**kw)) next = next basestring_ = (bytes, str) bytes = bytes text = str def is_unicode(obj): if sys.version_info[0] < 3: return isinstance(obj, unicode) else: return isinstance(obj, str) def coerce_text(v): if not isinstance(v, basestring_): if sys.version_info[0] < 3: attr = '__unicode__' else: attr = '__str__' if hasattr(v, attr): return unicode(v) else: return bytes(v) return v
bsd-3-clause
shreyasva/tensorflow
tensorflow/python/platform/googletest.py
11
1208
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Switch between depending on googletest or unittest.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import # pylint: disable=g-import-not-at-top # pylint: disable=wildcard-import import tensorflow.python.platform from . import control_imports if control_imports.USE_OSS and control_imports.OSS_GOOGLETEST: from tensorflow.python.platform.default._googletest import * else: from tensorflow.python.platform.google._googletest import *
apache-2.0
j2a/pytils
pytils/test/templatetags/test_dt.py
2
2146
# -*- coding: utf-8 -*- """ Unit tests for pytils' dt templatetags for Django web framework """ import datetime from pytils.test.templatetags import helpers class DtDefaultTestCase(helpers.TemplateTagTestCase): def setUp(self): self.date = datetime.datetime(2007, 1, 26, 15, 50) self.date_before = datetime.datetime.now() - datetime.timedelta(1, 2000) def testLoad(self): self.check_template_tag('load_tag', u'{% load pytils_dt %}', {}, u'') def testRuStrftimeFilter(self): self.check_template_tag('ru_strftime_filter', u'{% load pytils_dt %}{{ val|ru_strftime:"%d %B %Y, %A" }}', {'val': self.date}, u'26 января 2007, пятница') def testRuStrftimeInflectedFilter(self): self.check_template_tag('ru_strftime_inflected_filter', u'{% load pytils_dt %}{{ val|ru_strftime_inflected:"в %A, %d %B %Y" }}', {'val': self.date}, u'в пятницу, 26 января 2007') def testRuStrftimePrepositionFilter(self): self.check_template_tag('ru_strftime_preposition_filter', u'{% load pytils_dt %}{{ val|ru_strftime_preposition:"%A, %d %B %Y" }}', {'val': self.date}, u'в\xa0пятницу, 26 января 2007') def testDistanceFilter(self): self.check_template_tag('distance_filter', u'{% load pytils_dt %}{{ val|distance_of_time }}', {'val': self.date_before}, u'вчера') self.check_template_tag('distance_filter', u'{% load pytils_dt %}{{ val|distance_of_time:3 }}', {'val': self.date_before}, u'1 день 0 часов 33 минуты назад') # без отладки, если ошибка -- по умолчанию пустая строка def testRuStrftimeError(self): self.check_template_tag('ru_strftime_error', u'{% load pytils_dt %}{{ val|ru_strftime:"%d %B %Y" }}', {'val': 1}, u'') if __name__ == '__main__': import unittest unittest.main()
mit
noironetworks/heat
heat/engine/lifecycle_plugin.py
9
2138
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class LifecyclePlugin(object): """Base class for pre-op and post-op work on a stack. Implementations should extend this class and override the methods. """ def do_pre_op(self, cnxt, stack, current_stack=None, action=None): """Method to be run by heat before stack operations.""" pass def do_post_op(self, cnxt, stack, current_stack=None, action=None, is_stack_failure=False): """Method to be run by heat after stack operations, including failures. On failure to execute all the registered pre_ops, this method will be called if and only if the corresponding pre_op was successfully called. On failures of the actual stack operation, this method will be called if all the pre operations were successfully called. """ pass def get_ordinal(self): """Get the sort order for pre and post operation execution. The values returned by get_ordinal are used to create a partial order for pre and post operation method invocations. The default ordinal value of 100 may be overridden. If class1inst.ordinal() < class2inst.ordinal(), then the method on class1inst will be executed before the method on class2inst. If class1inst.ordinal() > class2inst.ordinal(), then the method on class1inst will be executed after the method on class2inst. If class1inst.ordinal() == class2inst.ordinal(), then the order of method invocation is indeterminate. """ return 100
apache-2.0
elkingtonmcb/pattern
setup.py
21
5347
#### PATTERN ####################################################################################### import sys import os from setuptools import setup from pattern import __version__ #--------------------------------------------------------------------------------------------------- # "python setup.py zip" will create the zipped distribution and checksum. if sys.argv[-1] == "zip": import zipfile import hashlib import codecs import re n = "pattern-%s.zip" % __version__ p = os.path.join(os.path.dirname(os.path.realpath(__file__))) z = zipfile.ZipFile(os.path.join(p, "..", n), "w", zipfile.ZIP_DEFLATED) for root, folders, files in os.walk(p): for f in files: f = os.path.join(root, f) # Exclude private settings. if f.endswith(os.path.join("web", "api.py")): d = "#--- PRIVATE" s = codecs.open(f, "r", encoding="utf-8").read().split(d) x = codecs.open(f, "w", encoding="utf-8") x.write(s[0]) x.close() # Exclude revision history (.git). # Exclude development files (.dev). if not re.search(r"\.DS|\.git[^i]|\.pyc|\.dev|tmp", f): z.write(f, os.path.join("pattern-" + __version__, os.path.relpath(f, p))) if f.endswith(os.path.join("web", "api.py")): x = codecs.open(f, "w", encoding="utf-8") x.write(d.join(s)) x.close() z.close() print n print hashlib.sha256(open(z.filename).read()).hexdigest() sys.exit(0) #--------------------------------------------------------------------------------------------------- # "python setup.py install" will install /pattern in /site-packages. setup( name = "Pattern", version = "2.6", description = "Web mining module for Python.", license = "BSD", author = "Tom De Smedt", author_email = "tom@organisms.be", url = "http://www.clips.ua.ac.be/pages/pattern", packages = [ "pattern", "pattern.web", "pattern.web.cache", "pattern.web.docx", "pattern.web.feed", "pattern.web.imap", "pattern.web.json", "pattern.web.locale", "pattern.web.oauth", "pattern.web.pdf", "pattern.web.soup", "pattern.db", "pattern.text", "pattern.text.de", "pattern.text.en", "pattern.text.en.wordlist", "pattern.text.en.wordnet", "pattern.text.en.wordnet.pywordnet", "pattern.text.es", "pattern.text.fr", "pattern.text.it", "pattern.text.nl", "pattern.vector", "pattern.vector.svm", "pattern.graph", "pattern.server" ], package_data = { "pattern" : ["*.js"], "pattern.web.cache" : ["tmp/*"], "pattern.web.docx" : ["*"], "pattern.web.feed" : ["*"], "pattern.web.json" : ["*"], "pattern.web.locale" : ["*"], "pattern.web.pdf" : ["*.txt", "cmap/*"], "pattern.web.soup" : ["*"], "pattern.text.de" : ["*.txt", "*.xml"], "pattern.text.en" : ["*.txt", "*.xml", "*.slp"], "pattern.text.en.wordlist": ["*.txt"], "pattern.text.en.wordnet" : ["*.txt", "dict/*"], "pattern.text.en.wordnet.pywordnet": ["*"], "pattern.text.es" : ["*.txt", "*.xml"], "pattern.text.fr" : ["*.txt", "*.xml"], "pattern.text.it" : ["*.txt", "*.xml"], "pattern.text.nl" : ["*.txt", "*.xml"], "pattern.vector" : ["*.txt"], "pattern.vector.svm" : ["*.txt", "libsvm-3.11/*", "libsvm-3.17/*", "liblinear-1.93/*"], "pattern.graph" : ["*.js", "*.csv"], "pattern.server" : ["static/*", "cherrypy/cherrypy/*.*", "cherrypy/cherrypy/*/*", "cherrypy/cherrypy/cherryd"], }, py_modules = [ "pattern.metrics", "pattern.text.search", "pattern.text.tree" ], classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Natural Language :: Dutch", "Natural Language :: English", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Italian", "Natural Language :: Spanish", "Operating System :: OS Independent", "Programming Language :: JavaScript", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP :: Indexing/Search", "Topic :: Multimedia :: Graphics", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Visualization", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Linguistic", "Topic :: Text Processing :: Markup :: HTML" ], zip_safe = False )
bsd-3-clause
CaioIcy/Dauphine
externals/gtest-1.7.0/test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
mit
chemelnucfin/tensorflow
tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
6
13119
"""Linear Estimators.""" # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import layers from tensorflow.contrib.linear_optimizer.python.ops import sdca_ops from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops # TODO(sibyl-vie3Poto, sibyl-Aix6ihai): Add proper testing to this wrapper once the API is # stable. class SDCAOptimizer(object): """Wrapper class for SDCA optimizer. The wrapper is currently meant for use as an optimizer within a tf.learn Estimator. Example usage: ```python real_feature_column = real_valued_column(...) sparse_feature_column = sparse_column_with_hash_bucket(...) sdca_optimizer = linear.SDCAOptimizer(example_id_column='example_id', num_loss_partitions=1, num_table_shards=1, symmetric_l2_regularization=2.0) classifier = tf.contrib.learn.LinearClassifier( feature_columns=[real_feature_column, sparse_feature_column], weight_column_name=..., optimizer=sdca_optimizer) classifier.fit(input_fn_train, steps=50) classifier.evaluate(input_fn=input_fn_eval) ``` Here the expectation is that the `input_fn_*` functions passed to train and evaluate return a pair (dict, label_tensor) where dict has `example_id_column` as `key` whose value is a `Tensor` of shape [batch_size] and dtype string. num_loss_partitions defines the number of partitions of the global loss function and should be set to `(#concurrent train ops/per worker) x (#workers)`. Convergence of (global) loss is guaranteed if `num_loss_partitions` is larger or equal to the above product. Larger values for `num_loss_partitions` lead to slower convergence. The recommended value for `num_loss_partitions` in `tf.learn` (where currently there is one process per worker) is the number of workers running the train steps. It defaults to 1 (single machine). `num_table_shards` defines the number of shards for the internal state table, typically set to match the number of parameter servers for large data sets. You can also specify a `partitioner` object to partition the primal weights during training (`div` partitioning strategy will be used). """ def __init__(self, example_id_column, num_loss_partitions=1, num_table_shards=None, symmetric_l1_regularization=0.0, symmetric_l2_regularization=1.0, adaptive=True, partitioner=None): self._example_id_column = example_id_column self._num_loss_partitions = num_loss_partitions self._num_table_shards = num_table_shards self._symmetric_l1_regularization = symmetric_l1_regularization self._symmetric_l2_regularization = symmetric_l2_regularization self._adaptive = adaptive self._partitioner = partitioner def get_name(self): return 'SDCAOptimizer' @property def example_id_column(self): return self._example_id_column @property def num_loss_partitions(self): return self._num_loss_partitions @property def num_table_shards(self): return self._num_table_shards @property def symmetric_l1_regularization(self): return self._symmetric_l1_regularization @property def symmetric_l2_regularization(self): return self._symmetric_l2_regularization @property def adaptive(self): return self._adaptive @property def partitioner(self): return self._partitioner def get_train_step(self, columns_to_variables, weight_column_name, loss_type, features, targets, global_step): """Returns the training operation of an SdcaModel optimizer.""" def _dense_tensor_to_sparse_feature_column(dense_tensor): """Returns SparseFeatureColumn for the input dense_tensor.""" ignore_value = 0.0 sparse_indices = array_ops.where( math_ops.not_equal(dense_tensor, math_ops.cast(ignore_value, dense_tensor.dtype))) sparse_values = array_ops.gather_nd(dense_tensor, sparse_indices) # TODO(sibyl-Aix6ihai, sibyl-vie3Poto): Makes this efficient, as now SDCA supports # very sparse features with weights and not weights. return SparseFeatureColumn( array_ops.reshape( array_ops.split( value=sparse_indices, num_or_size_splits=2, axis=1)[0], [-1]), array_ops.reshape( array_ops.split( value=sparse_indices, num_or_size_splits=2, axis=1)[1], [-1]), array_ops.reshape(math_ops.cast(sparse_values, dtypes.float32), [-1])) def _training_examples_and_variables(): """Returns dictionaries for training examples and variables.""" batch_size = targets.get_shape()[0] # Iterate over all feature columns and create appropriate lists for dense # and sparse features as well as dense and sparse weights (variables) for # SDCA. # TODO(sibyl-vie3Poto): Reshape variables stored as values in column_to_variables # dict as 1-dimensional tensors. dense_features, sparse_features, sparse_feature_with_values = [], [], [] dense_feature_weights = [] sparse_feature_weights, sparse_feature_with_values_weights = [], [] for column in sorted(columns_to_variables.keys(), key=lambda x: x.key): transformed_tensor = features[column] if isinstance(column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access # A real-valued column corresponds to a dense feature in SDCA. A # transformed tensor corresponding to a RealValuedColumn should have # rank at most 2. In order to be passed to SDCA, its rank needs to be # exactly 2 (i.e., its shape should be [batch_size, column.dim]). check_rank_op = control_flow_ops.Assert( math_ops.less_equal(array_ops.rank(transformed_tensor), 2), ['transformed_tensor should have rank at most 2.']) # Reshape to [batch_size, dense_column_dimension]. with ops.control_dependencies([check_rank_op]): transformed_tensor = array_ops.reshape(transformed_tensor, [ array_ops.shape(transformed_tensor)[0], -1 ]) dense_features.append(transformed_tensor) # For real valued columns, the variables list contains exactly one # element. dense_feature_weights.append(columns_to_variables[column][0]) elif isinstance(column, layers.feature_column._BucketizedColumn): # pylint: disable=protected-access # A bucketized column corresponds to a sparse feature in SDCA. The # bucketized feature is "sparsified" for SDCA by converting it to a # SparseFeatureColumn representing the one-hot encoding of the # bucketized feature. # # TODO(sibyl-vie3Poto): Explore whether it is more efficient to translate a # bucketized feature column to a dense feature in SDCA. This will # likely depend on the number of buckets. dense_bucket_tensor = column._to_dnn_input_layer(transformed_tensor) # pylint: disable=protected-access sparse_feature_column = _dense_tensor_to_sparse_feature_column( dense_bucket_tensor) sparse_feature_with_values.append(sparse_feature_column) # If a partitioner was used during variable creation, we will have a # list of Variables here larger than 1. vars_to_append = columns_to_variables[column][0] if len(columns_to_variables[column]) > 1: vars_to_append = columns_to_variables[column] sparse_feature_with_values_weights.append(vars_to_append) elif isinstance( column, ( layers.feature_column._WeightedSparseColumn, # pylint: disable=protected-access layers.feature_column._CrossedColumn, # pylint: disable=protected-access layers.feature_column._SparseColumn)): # pylint: disable=protected-access if isinstance(column, layers.feature_column._WeightedSparseColumn): # pylint: disable=protected-access id_tensor = column.id_tensor(transformed_tensor) weight_tensor = array_ops.reshape( column.weight_tensor(transformed_tensor).values, [-1]) else: id_tensor = transformed_tensor weight_tensor = array_ops.ones( [array_ops.shape(id_tensor.indices)[0]], dtypes.float32) example_ids = array_ops.reshape(id_tensor.indices[:, 0], [-1]) flat_ids = array_ops.reshape(id_tensor.values, [-1]) # Prune invalid IDs (< 0) from the flat_ids, example_ids, and # weight_tensor. These can come from looking up an OOV entry in the # vocabulary (default value being -1). is_id_valid = math_ops.greater_equal(flat_ids, 0) flat_ids = array_ops.boolean_mask(flat_ids, is_id_valid) example_ids = array_ops.boolean_mask(example_ids, is_id_valid) weight_tensor = array_ops.boolean_mask(weight_tensor, is_id_valid) projection_length = math_ops.reduce_max(flat_ids) + 1 # project ids based on example ids so that we can dedup ids that # occur multiple times for a single example. projected_ids = projection_length * example_ids + flat_ids # Remove any redundant ids. ids, idx = array_ops.unique(projected_ids) # Keep only one example id per duplicated ids. example_ids_filtered = math_ops.unsorted_segment_min( example_ids, idx, array_ops.shape(ids)[0]) # reproject ids back feature id space. reproject_ids = (ids - projection_length * example_ids_filtered) weights = array_ops.reshape( math_ops.unsorted_segment_sum(weight_tensor, idx, array_ops.shape(ids)[0]), [-1]) sparse_feature_with_values.append( SparseFeatureColumn(example_ids_filtered, reproject_ids, weights)) # If a partitioner was used during variable creation, we will have a # list of Variables here larger than 1. vars_to_append = columns_to_variables[column][0] if len(columns_to_variables[column]) > 1: vars_to_append = columns_to_variables[column] sparse_feature_with_values_weights.append(vars_to_append) else: raise ValueError('SDCAOptimizer does not support column type %s.' % type(column).__name__) example_weights = array_ops.reshape( features[weight_column_name], shape=[-1]) if weight_column_name else array_ops.ones([batch_size]) example_ids = features[self._example_id_column] sparse_feature_with_values.extend(sparse_features) sparse_feature_with_values_weights.extend(sparse_feature_weights) examples = dict( sparse_features=sparse_feature_with_values, dense_features=dense_features, example_labels=math_ops.cast( array_ops.reshape(targets, shape=[-1]), dtypes.float32), example_weights=example_weights, example_ids=example_ids) sdca_variables = dict( sparse_features_weights=sparse_feature_with_values_weights, dense_features_weights=dense_feature_weights) return examples, sdca_variables training_examples, training_variables = _training_examples_and_variables() sdca_model = sdca_ops.SdcaModel( examples=training_examples, variables=training_variables, options=dict( symmetric_l1_regularization=self._symmetric_l1_regularization, symmetric_l2_regularization=self._symmetric_l2_regularization, adaptive=self._adaptive, num_loss_partitions=self._num_loss_partitions, num_table_shards=self._num_table_shards, loss_type=loss_type)) train_op = sdca_model.minimize(global_step=global_step) return sdca_model, train_op
apache-2.0
jecp/mean
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
1366
120842
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode project file generator. This module is both an Xcode project file generator and a documentation of the Xcode project file format. Knowledge of the project file format was gained based on extensive experience with Xcode, and by making changes to projects in Xcode.app and observing the resultant changes in the associated project files. XCODE PROJECT FILES The generator targets the file format as written by Xcode 3.2 (specifically, 3.2.6), but past experience has taught that the format has not changed significantly in the past several years, and future versions of Xcode are able to read older project files. Xcode project files are "bundled": the project "file" from an end-user's perspective is actually a directory with an ".xcodeproj" extension. The project file from this module's perspective is actually a file inside this directory, always named "project.pbxproj". This file contains a complete description of the project and is all that is needed to use the xcodeproj. Other files contained in the xcodeproj directory are simply used to store per-user settings, such as the state of various UI elements in the Xcode application. The project.pbxproj file is a property list, stored in a format almost identical to the NeXTstep property list format. The file is able to carry Unicode data, and is encoded in UTF-8. The root element in the property list is a dictionary that contains several properties of minimal interest, and two properties of immense interest. The most important property is a dictionary named "objects". The entire structure of the project is represented by the children of this property. The objects dictionary is keyed by unique 96-bit values represented by 24 uppercase hexadecimal characters. Each value in the objects dictionary is itself a dictionary, describing an individual object. Each object in the dictionary is a member of a class, which is identified by the "isa" property of each object. A variety of classes are represented in a project file. Objects can refer to other objects by ID, using the 24-character hexadecimal object key. A project's objects form a tree, with a root object of class PBXProject at the root. As an example, the PBXProject object serves as parent to an XCConfigurationList object defining the build configurations used in the project, a PBXGroup object serving as a container for all files referenced in the project, and a list of target objects, each of which defines a target in the project. There are several different types of target object, such as PBXNativeTarget and PBXAggregateTarget. In this module, this relationship is expressed by having each target type derive from an abstract base named XCTarget. The project.pbxproj file's root dictionary also contains a property, sibling to the "objects" dictionary, named "rootObject". The value of rootObject is a 24-character object key referring to the root PBXProject object in the objects dictionary. In Xcode, every file used as input to a target or produced as a final product of a target must appear somewhere in the hierarchy rooted at the PBXGroup object referenced by the PBXProject's mainGroup property. A PBXGroup is generally represented as a folder in the Xcode application. PBXGroups can contain other PBXGroups as well as PBXFileReferences, which are pointers to actual files. Each XCTarget contains a list of build phases, represented in this module by the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the "Compile Sources" and "Link Binary With Libraries" phases displayed in the Xcode application. Files used as input to these phases (for example, source files in the former case and libraries and frameworks in the latter) are represented by PBXBuildFile objects, referenced by elements of "files" lists in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile object as a "weak" reference: it does not "own" the PBXBuildFile, which is owned by the root object's mainGroup or a descendant group. In most cases, the layer of indirection between an XCBuildPhase and a PBXFileReference via a PBXBuildFile appears extraneous, but there's actually one reason for this: file-specific compiler flags are added to the PBXBuildFile object so as to allow a single file to be a member of multiple targets while having distinct compiler flags for each. These flags can be modified in the Xcode applciation in the "Build" tab of a File Info window. When a project is open in the Xcode application, Xcode will rewrite it. As such, this module is careful to adhere to the formatting used by Xcode, to avoid insignificant changes appearing in the file when it is used in the Xcode application. This will keep version control repositories happy, and makes it possible to compare a project file used in Xcode to one generated by this module to determine if any significant changes were made in the application. Xcode has its own way of assigning 24-character identifiers to each object, which is not duplicated here. Because the identifier only is only generated once, when an object is created, and is then left unchanged, there is no need to attempt to duplicate Xcode's behavior in this area. The generator is free to select any identifier, even at random, to refer to the objects it creates, and Xcode will retain those identifiers and use them when subsequently rewriting the project file. However, the generator would choose new random identifiers each time the project files are generated, leading to difficulties comparing "used" project files to "pristine" ones produced by this module, and causing the appearance of changes as every object identifier is changed when updated projects are checked in to a version control repository. To mitigate this problem, this module chooses identifiers in a more deterministic way, by hashing a description of each object as well as its parent and ancestor objects. This strategy should result in minimal "shift" in IDs as successive generations of project files are produced. THIS MODULE This module introduces several classes, all derived from the XCObject class. Nearly all of the "brains" are built into the XCObject class, which understands how to create and modify objects, maintain the proper tree structure, compute identifiers, and print objects. For the most part, classes derived from XCObject need only provide a _schema class object, a dictionary that expresses what properties objects of the class may contain. Given this structure, it's possible to build a minimal project file by creating objects of the appropriate types and making the proper connections: config_list = XCConfigurationList() group = PBXGroup() project = PBXProject({'buildConfigurationList': config_list, 'mainGroup': group}) With the project object set up, it can be added to an XCProjectFile object. XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject subclass that does not actually correspond to a class type found in a project file. Rather, it is used to represent the project file's root dictionary. Printing an XCProjectFile will print the entire project file, including the full "objects" dictionary. project_file = XCProjectFile({'rootObject': project}) project_file.ComputeIDs() project_file.Print() Xcode project files are always encoded in UTF-8. This module will accept strings of either the str class or the unicode class. Strings of class str are assumed to already be encoded in UTF-8. Obviously, if you're just using ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset. Strings of class unicode are handled properly and encoded in UTF-8 when a project file is output. """ import gyp.common import posixpath import re import struct import sys # hashlib is supplied as of Python 2.5 as the replacement interface for sha # and other secure hashes. In 2.6, sha is deprecated. Import hashlib if # available, avoiding a deprecation warning under 2.6. Import sha otherwise, # preserving 2.4 compatibility. try: import hashlib _new_sha1 = hashlib.sha1 except ImportError: import sha _new_sha1 = sha.new # See XCObject._EncodeString. This pattern is used to determine when a string # can be printed unquoted. Strings that match this pattern may be printed # unquoted. Strings that do not match must be quoted and may be further # transformed to be properly encoded. Note that this expression matches the # characters listed with "+", for 1 or more occurrences: if a string is empty, # it must not match this pattern, because it needs to be encoded as "". _unquoted = re.compile('^[A-Za-z0-9$./_]+$') # Strings that match this pattern are quoted regardless of what _unquoted says. # Oddly, Xcode will quote any string with a run of three or more underscores. _quoted = re.compile('___') # This pattern should match any character that needs to be escaped by # XCObject._EncodeString. See that function. _escaped = re.compile('[\\\\"]|[\x00-\x1f]') # Used by SourceTreeAndPathFromPath _path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$') def SourceTreeAndPathFromPath(input_path): """Given input_path, returns a tuple with sourceTree and path values. Examples: input_path (source_tree, output_path) '$(VAR)/path' ('VAR', 'path') '$(VAR)' ('VAR', None) 'path' (None, 'path') """ source_group_match = _path_leading_variable.match(input_path) if source_group_match: source_tree = source_group_match.group(1) output_path = source_group_match.group(3) # This may be None. else: source_tree = None output_path = input_path return (source_tree, output_path) def ConvertVariablesToShellSyntax(input_string): return re.sub(r'\$\((.*?)\)', '${\\1}', input_string) class XCObject(object): """The abstract base of all class types used in Xcode project files. Class variables: _schema: A dictionary defining the properties of this class. The keys to _schema are string property keys as used in project files. Values are a list of four or five elements: [ is_list, property_type, is_strong, is_required, default ] is_list: True if the property described is a list, as opposed to a single element. property_type: The type to use as the value of the property, or if is_list is True, the type to use for each element of the value's list. property_type must be an XCObject subclass, or one of the built-in types str, int, or dict. is_strong: If property_type is an XCObject subclass, is_strong is True to assert that this class "owns," or serves as parent, to the property value (or, if is_list is True, values). is_strong must be False if property_type is not an XCObject subclass. is_required: True if the property is required for the class. Note that is_required being True does not preclude an empty string ("", in the case of property_type str) or list ([], in the case of is_list True) from being set for the property. default: Optional. If is_requried is True, default may be set to provide a default value for objects that do not supply their own value. If is_required is True and default is not provided, users of the class must supply their own value for the property. Note that although the values of the array are expressed in boolean terms, subclasses provide values as integers to conserve horizontal space. _should_print_single_line: False in XCObject. Subclasses whose objects should be written to the project file in the alternate single-line format, such as PBXFileReference and PBXBuildFile, should set this to True. _encode_transforms: Used by _EncodeString to encode unprintable characters. The index into this list is the ordinal of the character to transform; each value is a string used to represent the character in the output. XCObject provides an _encode_transforms list suitable for most XCObject subclasses. _alternate_encode_transforms: Provided for subclasses that wish to use the alternate encoding rules. Xcode seems to use these rules when printing objects in single-line format. Subclasses that desire this behavior should set _encode_transforms to _alternate_encode_transforms. _hashables: A list of XCObject subclasses that can be hashed by ComputeIDs to construct this object's ID. Most classes that need custom hashing behavior should do it by overriding Hashables, but in some cases an object's parent may wish to push a hashable value into its child, and it can do so by appending to _hashables. Attributes: id: The object's identifier, a 24-character uppercase hexadecimal string. Usually, objects being created should not set id until the entire project file structure is built. At that point, UpdateIDs() should be called on the root object to assign deterministic values for id to each object in the tree. parent: The object's parent. This is set by a parent XCObject when a child object is added to it. _properties: The object's property dictionary. An object's properties are described by its class' _schema variable. """ _schema = {} _should_print_single_line = False # See _EncodeString. _encode_transforms = [] i = 0 while i < ord(' '): _encode_transforms.append('\\U%04x' % i) i = i + 1 _encode_transforms[7] = '\\a' _encode_transforms[8] = '\\b' _encode_transforms[9] = '\\t' _encode_transforms[10] = '\\n' _encode_transforms[11] = '\\v' _encode_transforms[12] = '\\f' _encode_transforms[13] = '\\n' _alternate_encode_transforms = list(_encode_transforms) _alternate_encode_transforms[9] = chr(9) _alternate_encode_transforms[10] = chr(10) _alternate_encode_transforms[11] = chr(11) def __init__(self, properties=None, id=None, parent=None): self.id = id self.parent = parent self._properties = {} self._hashables = [] self._SetDefaultsFromSchema() self.UpdateProperties(properties) def __repr__(self): try: name = self.Name() except NotImplementedError: return '<%s at 0x%x>' % (self.__class__.__name__, id(self)) return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Copy(self): """Make a copy of this object. The new object will have its own copy of lists and dicts. Any XCObject objects owned by this object (marked "strong") will be copied in the new object, even those found in lists. If this object has any weak references to other XCObjects, the same references are added to the new object without making a copy. """ that = self.__class__(id=self.id, parent=self.parent) for key, value in self._properties.iteritems(): is_strong = self._schema[key][2] if isinstance(value, XCObject): if is_strong: new_value = value.Copy() new_value.parent = that that._properties[key] = new_value else: that._properties[key] = value elif isinstance(value, str) or isinstance(value, unicode) or \ isinstance(value, int): that._properties[key] = value elif isinstance(value, list): if is_strong: # If is_strong is True, each element is an XCObject, so it's safe to # call Copy. that._properties[key] = [] for item in value: new_item = item.Copy() new_item.parent = that that._properties[key].append(new_item) else: that._properties[key] = value[:] elif isinstance(value, dict): # dicts are never strong. if is_strong: raise TypeError('Strong dict for key ' + key + ' in ' + \ self.__class__.__name__) else: that._properties[key] = value.copy() else: raise TypeError('Unexpected type ' + value.__class__.__name__ + \ ' for key ' + key + ' in ' + self.__class__.__name__) return that def Name(self): """Return the name corresponding to an object. Not all objects necessarily need to be nameable, and not all that do have a "name" property. Override as needed. """ # If the schema indicates that "name" is required, try to access the # property even if it doesn't exist. This will result in a KeyError # being raised for the property that should be present, which seems more # appropriate than NotImplementedError in this case. if 'name' in self._properties or \ ('name' in self._schema and self._schema['name'][3]): return self._properties['name'] raise NotImplementedError(self.__class__.__name__ + ' must implement Name') def Comment(self): """Return a comment string for the object. Most objects just use their name as the comment, but PBXProject uses different values. The returned comment is not escaped and does not have any comment marker strings applied to it. """ return self.Name() def Hashables(self): hashables = [self.__class__.__name__] name = self.Name() if name != None: hashables.append(name) hashables.extend(self._hashables) return hashables def HashablesForChild(self): return None def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None): """Set "id" properties deterministically. An object's "id" property is set based on a hash of its class type and name, as well as the class type and name of all ancestor objects. As such, it is only advisable to call ComputeIDs once an entire project file tree is built. If recursive is True, recurse into all descendant objects and update their hashes. If overwrite is True, any existing value set in the "id" property will be replaced. """ def _HashUpdate(hash, data): """Update hash with data's length and contents. If the hash were updated only with the value of data, it would be possible for clowns to induce collisions by manipulating the names of their objects. By adding the length, it's exceedingly less likely that ID collisions will be encountered, intentionally or not. """ hash.update(struct.pack('>i', len(data))) hash.update(data) if seed_hash is None: seed_hash = _new_sha1() hash = seed_hash.copy() hashables = self.Hashables() assert len(hashables) > 0 for hashable in hashables: _HashUpdate(hash, hashable) if recursive: hashables_for_child = self.HashablesForChild() if hashables_for_child is None: child_hash = hash else: assert len(hashables_for_child) > 0 child_hash = seed_hash.copy() for hashable in hashables_for_child: _HashUpdate(child_hash, hashable) for child in self.Children(): child.ComputeIDs(recursive, overwrite, child_hash) if overwrite or self.id is None: # Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is # is 160 bits. Instead of throwing out 64 bits of the digest, xor them # into the portion that gets used. assert hash.digest_size % 4 == 0 digest_int_count = hash.digest_size / 4 digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest()) id_ints = [0, 0, 0] for index in xrange(0, digest_int_count): id_ints[index % 3] ^= digest_ints[index] self.id = '%08X%08X%08X' % tuple(id_ints) def EnsureNoIDCollisions(self): """Verifies that no two objects have the same ID. Checks all descendants. """ ids = {} descendants = self.Descendants() for descendant in descendants: if descendant.id in ids: other = ids[descendant.id] raise KeyError( 'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \ (descendant.id, str(descendant._properties), str(other._properties), self._properties['rootObject'].Name())) ids[descendant.id] = descendant def Children(self): """Returns a list of all of this object's owned (strong) children.""" children = [] for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong) = attributes[0:3] if is_strong and property in self._properties: if not is_list: children.append(self._properties[property]) else: children.extend(self._properties[property]) return children def Descendants(self): """Returns a list of all of this object's descendants, including this object. """ children = self.Children() descendants = [self] for child in children: descendants.extend(child.Descendants()) return descendants def PBXProjectAncestor(self): # The base case for recursion is defined at PBXProject.PBXProjectAncestor. if self.parent: return self.parent.PBXProjectAncestor() return None def _EncodeComment(self, comment): """Encodes a comment to be placed in the project file output, mimicing Xcode behavior. """ # This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If # the string already contains a "*/", it is turned into "(*)/". This keeps # the file writer from outputting something that would be treated as the # end of a comment in the middle of something intended to be entirely a # comment. return '/* ' + comment.replace('*/', '(*)/') + ' */' def _EncodeTransform(self, match): # This function works closely with _EncodeString. It will only be called # by re.sub with match.group(0) containing a character matched by the # the _escaped expression. char = match.group(0) # Backslashes (\) and quotation marks (") are always replaced with a # backslash-escaped version of the same. Everything else gets its # replacement from the class' _encode_transforms array. if char == '\\': return '\\\\' if char == '"': return '\\"' return self._encode_transforms[ord(char)] def _EncodeString(self, value): """Encodes a string to be placed in the project file output, mimicing Xcode behavior. """ # Use quotation marks when any character outside of the range A-Z, a-z, 0-9, # $ (dollar sign), . (period), and _ (underscore) is present. Also use # quotation marks to represent empty strings. # # Escape " (double-quote) and \ (backslash) by preceding them with a # backslash. # # Some characters below the printable ASCII range are encoded specially: # 7 ^G BEL is encoded as "\a" # 8 ^H BS is encoded as "\b" # 11 ^K VT is encoded as "\v" # 12 ^L NP is encoded as "\f" # 127 ^? DEL is passed through as-is without escaping # - In PBXFileReference and PBXBuildFile objects: # 9 ^I HT is passed through as-is without escaping # 10 ^J NL is passed through as-is without escaping # 13 ^M CR is passed through as-is without escaping # - In other objects: # 9 ^I HT is encoded as "\t" # 10 ^J NL is encoded as "\n" # 13 ^M CR is encoded as "\n" rendering it indistinguishable from # 10 ^J NL # All other characters within the ASCII control character range (0 through # 31 inclusive) are encoded as "\U001f" referring to the Unicode code point # in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e". # Characters above the ASCII range are passed through to the output encoded # as UTF-8 without any escaping. These mappings are contained in the # class' _encode_transforms list. if _unquoted.search(value) and not _quoted.search(value): return value return '"' + _escaped.sub(self._EncodeTransform, value) + '"' def _XCPrint(self, file, tabs, line): file.write('\t' * tabs + line) def _XCPrintableValue(self, tabs, value, flatten_list=False): """Returns a representation of value that may be printed in a project file, mimicing Xcode's behavior. _XCPrintableValue can handle str and int values, XCObjects (which are made printable by returning their id property), and list and dict objects composed of any of the above types. When printing a list or dict, and _should_print_single_line is False, the tabs parameter is used to determine how much to indent the lines corresponding to the items in the list or dict. If flatten_list is True, single-element lists will be transformed into strings. """ printable = '' comment = None if self._should_print_single_line: sep = ' ' element_tabs = '' end_tabs = '' else: sep = '\n' element_tabs = '\t' * (tabs + 1) end_tabs = '\t' * tabs if isinstance(value, XCObject): printable += value.id comment = value.Comment() elif isinstance(value, str): printable += self._EncodeString(value) elif isinstance(value, unicode): printable += self._EncodeString(value.encode('utf-8')) elif isinstance(value, int): printable += str(value) elif isinstance(value, list): if flatten_list and len(value) <= 1: if len(value) == 0: printable += self._EncodeString('') else: printable += self._EncodeString(value[0]) else: printable = '(' + sep for item in value: printable += element_tabs + \ self._XCPrintableValue(tabs + 1, item, flatten_list) + \ ',' + sep printable += end_tabs + ')' elif isinstance(value, dict): printable = '{' + sep for item_key, item_value in sorted(value.iteritems()): printable += element_tabs + \ self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \ self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \ sep printable += end_tabs + '}' else: raise TypeError("Can't make " + value.__class__.__name__ + ' printable') if comment != None: printable += ' ' + self._EncodeComment(comment) return printable def _XCKVPrint(self, file, tabs, key, value): """Prints a key and value, members of an XCObject's _properties dictionary, to file. tabs is an int identifying the indentation level. If the class' _should_print_single_line variable is True, tabs is ignored and the key-value pair will be followed by a space insead of a newline. """ if self._should_print_single_line: printable = '' after_kv = ' ' else: printable = '\t' * tabs after_kv = '\n' # Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy # objects without comments. Sometimes it prints them with comments, but # the majority of the time, it doesn't. To avoid unnecessary changes to # the project file after Xcode opens it, don't write comments for # remoteGlobalIDString. This is a sucky hack and it would certainly be # cleaner to extend the schema to indicate whether or not a comment should # be printed, but since this is the only case where the problem occurs and # Xcode itself can't seem to make up its mind, the hack will suffice. # # Also see PBXContainerItemProxy._schema['remoteGlobalIDString']. if key == 'remoteGlobalIDString' and isinstance(self, PBXContainerItemProxy): value_to_print = value.id else: value_to_print = value # PBXBuildFile's settings property is represented in the output as a dict, # but a hack here has it represented as a string. Arrange to strip off the # quotes so that it shows up in the output as expected. if key == 'settings' and isinstance(self, PBXBuildFile): strip_value_quotes = True else: strip_value_quotes = False # In another one-off, let's set flatten_list on buildSettings properties # of XCBuildConfiguration objects, because that's how Xcode treats them. if key == 'buildSettings' and isinstance(self, XCBuildConfiguration): flatten_list = True else: flatten_list = False try: printable_key = self._XCPrintableValue(tabs, key, flatten_list) printable_value = self._XCPrintableValue(tabs, value_to_print, flatten_list) if strip_value_quotes and len(printable_value) > 1 and \ printable_value[0] == '"' and printable_value[-1] == '"': printable_value = printable_value[1:-1] printable += printable_key + ' = ' + printable_value + ';' + after_kv except TypeError, e: gyp.common.ExceptionAppend(e, 'while printing key "%s"' % key) raise self._XCPrint(file, 0, printable) def Print(self, file=sys.stdout): """Prints a reprentation of this object to file, adhering to Xcode output formatting. """ self.VerifyHasRequiredProperties() if self._should_print_single_line: # When printing an object in a single line, Xcode doesn't put any space # between the beginning of a dictionary (or presumably a list) and the # first contained item, so you wind up with snippets like # ...CDEF = {isa = PBXFileReference; fileRef = 0123... # If it were me, I would have put a space in there after the opening # curly, but I guess this is just another one of those inconsistencies # between how Xcode prints PBXFileReference and PBXBuildFile objects as # compared to other objects. Mimic Xcode's behavior here by using an # empty string for sep. sep = '' end_tabs = 0 else: sep = '\n' end_tabs = 2 # Start the object. For example, '\t\tPBXProject = {\n'. self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep) # "isa" isn't in the _properties dictionary, it's an intrinsic property # of the class which the object belongs to. Xcode always outputs "isa" # as the first element of an object dictionary. self._XCKVPrint(file, 3, 'isa', self.__class__.__name__) # The remaining elements of an object dictionary are sorted alphabetically. for property, value in sorted(self._properties.iteritems()): self._XCKVPrint(file, 3, property, value) # End the object. self._XCPrint(file, end_tabs, '};\n') def UpdateProperties(self, properties, do_copy=False): """Merge the supplied properties into the _properties dictionary. The input properties must adhere to the class schema or a KeyError or TypeError exception will be raised. If adding an object of an XCObject subclass and the schema indicates a strong relationship, the object's parent will be set to this object. If do_copy is True, then lists, dicts, strong-owned XCObjects, and strong-owned XCObjects in lists will be copied instead of having their references added. """ if properties is None: return for property, value in properties.iteritems(): # Make sure the property is in the schema. if not property in self._schema: raise KeyError(property + ' not in ' + self.__class__.__name__) # Make sure the property conforms to the schema. (is_list, property_type, is_strong) = self._schema[property][0:3] if is_list: if value.__class__ != list: raise TypeError( property + ' of ' + self.__class__.__name__ + \ ' must be list, not ' + value.__class__.__name__) for item in value: if not isinstance(item, property_type) and \ not (item.__class__ == unicode and property_type == str): # Accept unicode where str is specified. str is treated as # UTF-8-encoded. raise TypeError( 'item of ' + property + ' of ' + self.__class__.__name__ + \ ' must be ' + property_type.__name__ + ', not ' + \ item.__class__.__name__) elif not isinstance(value, property_type) and \ not (value.__class__ == unicode and property_type == str): # Accept unicode where str is specified. str is treated as # UTF-8-encoded. raise TypeError( property + ' of ' + self.__class__.__name__ + ' must be ' + \ property_type.__name__ + ', not ' + value.__class__.__name__) # Checks passed, perform the assignment. if do_copy: if isinstance(value, XCObject): if is_strong: self._properties[property] = value.Copy() else: self._properties[property] = value elif isinstance(value, str) or isinstance(value, unicode) or \ isinstance(value, int): self._properties[property] = value elif isinstance(value, list): if is_strong: # If is_strong is True, each element is an XCObject, so it's safe # to call Copy. self._properties[property] = [] for item in value: self._properties[property].append(item.Copy()) else: self._properties[property] = value[:] elif isinstance(value, dict): self._properties[property] = value.copy() else: raise TypeError("Don't know how to copy a " + \ value.__class__.__name__ + ' object for ' + \ property + ' in ' + self.__class__.__name__) else: self._properties[property] = value # Set up the child's back-reference to this object. Don't use |value| # any more because it may not be right if do_copy is true. if is_strong: if not is_list: self._properties[property].parent = self else: for item in self._properties[property]: item.parent = self def HasProperty(self, key): return key in self._properties def GetProperty(self, key): return self._properties[key] def SetProperty(self, key, value): self.UpdateProperties({key: value}) def DelProperty(self, key): if key in self._properties: del self._properties[key] def AppendProperty(self, key, value): # TODO(mark): Support ExtendProperty too (and make this call that)? # Schema validation. if not key in self._schema: raise KeyError(key + ' not in ' + self.__class__.__name__) (is_list, property_type, is_strong) = self._schema[key][0:3] if not is_list: raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list') if not isinstance(value, property_type): raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \ ' must be ' + property_type.__name__ + ', not ' + \ value.__class__.__name__) # If the property doesn't exist yet, create a new empty list to receive the # item. if not key in self._properties: self._properties[key] = [] # Set up the ownership link. if is_strong: value.parent = self # Store the item. self._properties[key].append(value) def VerifyHasRequiredProperties(self): """Ensure that all properties identified as required by the schema are set. """ # TODO(mark): A stronger verification mechanism is needed. Some # subclasses need to perform validation beyond what the schema can enforce. for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong, is_required) = attributes[0:4] if is_required and not property in self._properties: raise KeyError(self.__class__.__name__ + ' requires ' + property) def _SetDefaultsFromSchema(self): """Assign object default values according to the schema. This will not overwrite properties that have already been set.""" defaults = {} for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong, is_required) = attributes[0:4] if is_required and len(attributes) >= 5 and \ not property in self._properties: default = attributes[4] defaults[property] = default if len(defaults) > 0: # Use do_copy=True so that each new object gets its own copy of strong # objects, lists, and dicts. self.UpdateProperties(defaults, do_copy=True) class XCHierarchicalElement(XCObject): """Abstract base for PBXGroup and PBXFileReference. Not represented in a project file.""" # TODO(mark): Do name and path belong here? Probably so. # If path is set and name is not, name may have a default value. Name will # be set to the basename of path, if the basename of path is different from # the full value of path. If path is already just a leaf name, name will # not be set. _schema = XCObject._schema.copy() _schema.update({ 'comments': [0, str, 0, 0], 'fileEncoding': [0, str, 0, 0], 'includeInIndex': [0, int, 0, 0], 'indentWidth': [0, int, 0, 0], 'lineEnding': [0, int, 0, 0], 'sourceTree': [0, str, 0, 1, '<group>'], 'tabWidth': [0, int, 0, 0], 'usesTabs': [0, int, 0, 0], 'wrapsLines': [0, int, 0, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCObject.__init__(self, properties, id, parent) if 'path' in self._properties and not 'name' in self._properties: path = self._properties['path'] name = posixpath.basename(path) if name != '' and path != name: self.SetProperty('name', name) if 'path' in self._properties and \ (not 'sourceTree' in self._properties or \ self._properties['sourceTree'] == '<group>'): # If the pathname begins with an Xcode variable like "$(SDKROOT)/", take # the variable out and make the path be relative to that variable by # assigning the variable name as the sourceTree. (source_tree, path) = SourceTreeAndPathFromPath(self._properties['path']) if source_tree != None: self._properties['sourceTree'] = source_tree if path != None: self._properties['path'] = path if source_tree != None and path is None and \ not 'name' in self._properties: # The path was of the form "$(SDKROOT)" with no path following it. # This object is now relative to that variable, so it has no path # attribute of its own. It does, however, keep a name. del self._properties['path'] self._properties['name'] = source_tree def Name(self): if 'name' in self._properties: return self._properties['name'] elif 'path' in self._properties: return self._properties['path'] else: # This happens in the case of the root PBXGroup. return None def Hashables(self): """Custom hashables for XCHierarchicalElements. XCHierarchicalElements are special. Generally, their hashes shouldn't change if the paths don't change. The normal XCObject implementation of Hashables adds a hashable for each object, which means that if the hierarchical structure changes (possibly due to changes caused when TakeOverOnlyChild runs and encounters slight changes in the hierarchy), the hashes will change. For example, if a project file initially contains a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent a/b. If someone later adds a/f2 to the project file, a/b can no longer be collapsed, and f1 winds up with parent b and grandparent a. That would be sufficient to change f1's hash. To counteract this problem, hashables for all XCHierarchicalElements except for the main group (which has neither a name nor a path) are taken to be just the set of path components. Because hashables are inherited from parents, this provides assurance that a/b/f1 has the same set of hashables whether its parent is b or a/b. The main group is a special case. As it is permitted to have no name or path, it is permitted to use the standard XCObject hash mechanism. This is not considered a problem because there can be only one main group. """ if self == self.PBXProjectAncestor()._properties['mainGroup']: # super return XCObject.Hashables(self) hashables = [] # Put the name in first, ensuring that if TakeOverOnlyChild collapses # children into a top-level group like "Source", the name always goes # into the list of hashables without interfering with path components. if 'name' in self._properties: # Make it less likely for people to manipulate hashes by following the # pattern of always pushing an object type value onto the list first. hashables.append(self.__class__.__name__ + '.name') hashables.append(self._properties['name']) # NOTE: This still has the problem that if an absolute path is encountered, # including paths with a sourceTree, they'll still inherit their parents' # hashables, even though the paths aren't relative to their parents. This # is not expected to be much of a problem in practice. path = self.PathFromSourceTreeAndPath() if path != None: components = path.split(posixpath.sep) for component in components: hashables.append(self.__class__.__name__ + '.path') hashables.append(component) hashables.extend(self._hashables) return hashables def Compare(self, other): # Allow comparison of these types. PBXGroup has the highest sort rank; # PBXVariantGroup is treated as equal to PBXFileReference. valid_class_types = { PBXFileReference: 'file', PBXGroup: 'group', PBXVariantGroup: 'file', } self_type = valid_class_types[self.__class__] other_type = valid_class_types[other.__class__] if self_type == other_type: # If the two objects are of the same sort rank, compare their names. return cmp(self.Name(), other.Name()) # Otherwise, sort groups before everything else. if self_type == 'group': return -1 return 1 def CompareRootGroup(self, other): # This function should be used only to compare direct children of the # containing PBXProject's mainGroup. These groups should appear in the # listed order. # TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the # generator should have a way of influencing this list rather than having # to hardcode for the generator here. order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products', 'Build'] # If the groups aren't in the listed order, do a name comparison. # Otherwise, groups in the listed order should come before those that # aren't. self_name = self.Name() other_name = other.Name() self_in = isinstance(self, PBXGroup) and self_name in order other_in = isinstance(self, PBXGroup) and other_name in order if not self_in and not other_in: return self.Compare(other) if self_name in order and not other_name in order: return -1 if other_name in order and not self_name in order: return 1 # If both groups are in the listed order, go by the defined order. self_index = order.index(self_name) other_index = order.index(other_name) if self_index < other_index: return -1 if self_index > other_index: return 1 return 0 def PathFromSourceTreeAndPath(self): # Turn the object's sourceTree and path properties into a single flat # string of a form comparable to the path parameter. If there's a # sourceTree property other than "<group>", wrap it in $(...) for the # comparison. components = [] if self._properties['sourceTree'] != '<group>': components.append('$(' + self._properties['sourceTree'] + ')') if 'path' in self._properties: components.append(self._properties['path']) if len(components) > 0: return posixpath.join(*components) return None def FullPath(self): # Returns a full path to self relative to the project file, or relative # to some other source tree. Start with self, and walk up the chain of # parents prepending their paths, if any, until no more parents are # available (project-relative path) or until a path relative to some # source tree is found. xche = self path = None while isinstance(xche, XCHierarchicalElement) and \ (path is None or \ (not path.startswith('/') and not path.startswith('$'))): this_path = xche.PathFromSourceTreeAndPath() if this_path != None and path != None: path = posixpath.join(this_path, path) elif this_path != None: path = this_path xche = xche.parent return path class PBXGroup(XCHierarchicalElement): """ Attributes: _children_by_path: Maps pathnames of children of this PBXGroup to the actual child XCHierarchicalElement objects. _variant_children_by_name_and_path: Maps (name, path) tuples of PBXVariantGroup children to the actual child PBXVariantGroup objects. """ _schema = XCHierarchicalElement._schema.copy() _schema.update({ 'children': [1, XCHierarchicalElement, 1, 1, []], 'name': [0, str, 0, 0], 'path': [0, str, 0, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCHierarchicalElement.__init__(self, properties, id, parent) self._children_by_path = {} self._variant_children_by_name_and_path = {} for child in self._properties.get('children', []): self._AddChildToDicts(child) def Hashables(self): # super hashables = XCHierarchicalElement.Hashables(self) # It is not sufficient to just rely on name and parent to build a unique # hashable : a node could have two child PBXGroup sharing a common name. # To add entropy the hashable is enhanced with the names of all its # children. for child in self._properties.get('children', []): child_name = child.Name() if child_name != None: hashables.append(child_name) return hashables def HashablesForChild(self): # To avoid a circular reference the hashables used to compute a child id do # not include the child names. return XCHierarchicalElement.Hashables(self) def _AddChildToDicts(self, child): # Sets up this PBXGroup object's dicts to reference the child properly. child_path = child.PathFromSourceTreeAndPath() if child_path: if child_path in self._children_by_path: raise ValueError('Found multiple children with path ' + child_path) self._children_by_path[child_path] = child if isinstance(child, PBXVariantGroup): child_name = child._properties.get('name', None) key = (child_name, child_path) if key in self._variant_children_by_name_and_path: raise ValueError('Found multiple PBXVariantGroup children with ' + \ 'name ' + str(child_name) + ' and path ' + \ str(child_path)) self._variant_children_by_name_and_path[key] = child def AppendChild(self, child): # Callers should use this instead of calling # AppendProperty('children', child) directly because this function # maintains the group's dicts. self.AppendProperty('children', child) self._AddChildToDicts(child) def GetChildByName(self, name): # This is not currently optimized with a dict as GetChildByPath is because # it has few callers. Most callers probably want GetChildByPath. This # function is only useful to get children that have names but no paths, # which is rare. The children of the main group ("Source", "Products", # etc.) is pretty much the only case where this likely to come up. # # TODO(mark): Maybe this should raise an error if more than one child is # present with the same name. if not 'children' in self._properties: return None for child in self._properties['children']: if child.Name() == name: return child return None def GetChildByPath(self, path): if not path: return None if path in self._children_by_path: return self._children_by_path[path] return None def GetChildByRemoteObject(self, remote_object): # This method is a little bit esoteric. Given a remote_object, which # should be a PBXFileReference in another project file, this method will # return this group's PBXReferenceProxy object serving as a local proxy # for the remote PBXFileReference. # # This function might benefit from a dict optimization as GetChildByPath # for some workloads, but profiling shows that it's not currently a # problem. if not 'children' in self._properties: return None for child in self._properties['children']: if not isinstance(child, PBXReferenceProxy): continue container_proxy = child._properties['remoteRef'] if container_proxy._properties['remoteGlobalIDString'] == remote_object: return child return None def AddOrGetFileByPath(self, path, hierarchical): """Returns an existing or new file reference corresponding to path. If hierarchical is True, this method will create or use the necessary hierarchical group structure corresponding to path. Otherwise, it will look in and create an item in the current group only. If an existing matching reference is found, it is returned, otherwise, a new one will be created, added to the correct group, and returned. If path identifies a directory by virtue of carrying a trailing slash, this method returns a PBXFileReference of "folder" type. If path identifies a variant, by virtue of it identifying a file inside a directory with an ".lproj" extension, this method returns a PBXVariantGroup containing the variant named by path, and possibly other variants. For all other paths, a "normal" PBXFileReference will be returned. """ # Adding or getting a directory? Directories end with a trailing slash. is_dir = False if path.endswith('/'): is_dir = True path = posixpath.normpath(path) if is_dir: path = path + '/' # Adding or getting a variant? Variants are files inside directories # with an ".lproj" extension. Xcode uses variants for localization. For # a variant path/to/Language.lproj/MainMenu.nib, put a variant group named # MainMenu.nib inside path/to, and give it a variant named Language. In # this example, grandparent would be set to path/to and parent_root would # be set to Language. variant_name = None parent = posixpath.dirname(path) grandparent = posixpath.dirname(parent) parent_basename = posixpath.basename(parent) (parent_root, parent_ext) = posixpath.splitext(parent_basename) if parent_ext == '.lproj': variant_name = parent_root if grandparent == '': grandparent = None # Putting a directory inside a variant group is not currently supported. assert not is_dir or variant_name is None path_split = path.split(posixpath.sep) if len(path_split) == 1 or \ ((is_dir or variant_name != None) and len(path_split) == 2) or \ not hierarchical: # The PBXFileReference or PBXVariantGroup will be added to or gotten from # this PBXGroup, no recursion necessary. if variant_name is None: # Add or get a PBXFileReference. file_ref = self.GetChildByPath(path) if file_ref != None: assert file_ref.__class__ == PBXFileReference else: file_ref = PBXFileReference({'path': path}) self.AppendChild(file_ref) else: # Add or get a PBXVariantGroup. The variant group name is the same # as the basename (MainMenu.nib in the example above). grandparent # specifies the path to the variant group itself, and path_split[-2:] # is the path of the specific variant relative to its group. variant_group_name = posixpath.basename(path) variant_group_ref = self.AddOrGetVariantGroupByNameAndPath( variant_group_name, grandparent) variant_path = posixpath.sep.join(path_split[-2:]) variant_ref = variant_group_ref.GetChildByPath(variant_path) if variant_ref != None: assert variant_ref.__class__ == PBXFileReference else: variant_ref = PBXFileReference({'name': variant_name, 'path': variant_path}) variant_group_ref.AppendChild(variant_ref) # The caller is interested in the variant group, not the specific # variant file. file_ref = variant_group_ref return file_ref else: # Hierarchical recursion. Add or get a PBXGroup corresponding to the # outermost path component, and then recurse into it, chopping off that # path component. next_dir = path_split[0] group_ref = self.GetChildByPath(next_dir) if group_ref != None: assert group_ref.__class__ == PBXGroup else: group_ref = PBXGroup({'path': next_dir}) self.AppendChild(group_ref) return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]), hierarchical) def AddOrGetVariantGroupByNameAndPath(self, name, path): """Returns an existing or new PBXVariantGroup for name and path. If a PBXVariantGroup identified by the name and path arguments is already present as a child of this object, it is returned. Otherwise, a new PBXVariantGroup with the correct properties is created, added as a child, and returned. This method will generally be called by AddOrGetFileByPath, which knows when to create a variant group based on the structure of the pathnames passed to it. """ key = (name, path) if key in self._variant_children_by_name_and_path: variant_group_ref = self._variant_children_by_name_and_path[key] assert variant_group_ref.__class__ == PBXVariantGroup return variant_group_ref variant_group_properties = {'name': name} if path != None: variant_group_properties['path'] = path variant_group_ref = PBXVariantGroup(variant_group_properties) self.AppendChild(variant_group_ref) return variant_group_ref def TakeOverOnlyChild(self, recurse=False): """If this PBXGroup has only one child and it's also a PBXGroup, take it over by making all of its children this object's children. This function will continue to take over only children when those children are groups. If there are three PBXGroups representing a, b, and c, with c inside b and b inside a, and a and b have no other children, this will result in a taking over both b and c, forming a PBXGroup for a/b/c. If recurse is True, this function will recurse into children and ask them to collapse themselves by taking over only children as well. Assuming an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f (d1, d2, and f are files, the rest are groups), recursion will result in a group for a/b/c containing a group for d3/e. """ # At this stage, check that child class types are PBXGroup exactly, # instead of using isinstance. The only subclass of PBXGroup, # PBXVariantGroup, should not participate in reparenting in the same way: # reparenting by merging different object types would be wrong. while len(self._properties['children']) == 1 and \ self._properties['children'][0].__class__ == PBXGroup: # Loop to take over the innermost only-child group possible. child = self._properties['children'][0] # Assume the child's properties, including its children. Save a copy # of this object's old properties, because they'll still be needed. # This object retains its existing id and parent attributes. old_properties = self._properties self._properties = child._properties self._children_by_path = child._children_by_path if not 'sourceTree' in self._properties or \ self._properties['sourceTree'] == '<group>': # The child was relative to its parent. Fix up the path. Note that # children with a sourceTree other than "<group>" are not relative to # their parents, so no path fix-up is needed in that case. if 'path' in old_properties: if 'path' in self._properties: # Both the original parent and child have paths set. self._properties['path'] = posixpath.join(old_properties['path'], self._properties['path']) else: # Only the original parent has a path, use it. self._properties['path'] = old_properties['path'] if 'sourceTree' in old_properties: # The original parent had a sourceTree set, use it. self._properties['sourceTree'] = old_properties['sourceTree'] # If the original parent had a name set, keep using it. If the original # parent didn't have a name but the child did, let the child's name # live on. If the name attribute seems unnecessary now, get rid of it. if 'name' in old_properties and old_properties['name'] != None and \ old_properties['name'] != self.Name(): self._properties['name'] = old_properties['name'] if 'name' in self._properties and 'path' in self._properties and \ self._properties['name'] == self._properties['path']: del self._properties['name'] # Notify all children of their new parent. for child in self._properties['children']: child.parent = self # If asked to recurse, recurse. if recurse: for child in self._properties['children']: if child.__class__ == PBXGroup: child.TakeOverOnlyChild(recurse) def SortGroup(self): self._properties['children'] = \ sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y)) # Recurse. for child in self._properties['children']: if isinstance(child, PBXGroup): child.SortGroup() class XCFileLikeElement(XCHierarchicalElement): # Abstract base for objects that can be used as the fileRef property of # PBXBuildFile. def PathHashables(self): # A PBXBuildFile that refers to this object will call this method to # obtain additional hashables specific to this XCFileLikeElement. Don't # just use this object's hashables, they're not specific and unique enough # on their own (without access to the parent hashables.) Instead, provide # hashables that identify this object by path by getting its hashables as # well as the hashables of ancestor XCHierarchicalElement objects. hashables = [] xche = self while xche != None and isinstance(xche, XCHierarchicalElement): xche_hashables = xche.Hashables() for index in xrange(0, len(xche_hashables)): hashables.insert(index, xche_hashables[index]) xche = xche.parent return hashables class XCContainerPortal(XCObject): # Abstract base for objects that can be used as the containerPortal property # of PBXContainerItemProxy. pass class XCRemoteObject(XCObject): # Abstract base for objects that can be used as the remoteGlobalIDString # property of PBXContainerItemProxy. pass class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject): _schema = XCFileLikeElement._schema.copy() _schema.update({ 'explicitFileType': [0, str, 0, 0], 'lastKnownFileType': [0, str, 0, 0], 'name': [0, str, 0, 0], 'path': [0, str, 0, 1], }) # Weird output rules for PBXFileReference. _should_print_single_line = True # super _encode_transforms = XCFileLikeElement._alternate_encode_transforms def __init__(self, properties=None, id=None, parent=None): # super XCFileLikeElement.__init__(self, properties, id, parent) if 'path' in self._properties and self._properties['path'].endswith('/'): self._properties['path'] = self._properties['path'][:-1] is_dir = True else: is_dir = False if 'path' in self._properties and \ not 'lastKnownFileType' in self._properties and \ not 'explicitFileType' in self._properties: # TODO(mark): This is the replacement for a replacement for a quick hack. # It is no longer incredibly sucky, but this list needs to be extended. extension_map = { 'a': 'archive.ar', 'app': 'wrapper.application', 'bdic': 'file', 'bundle': 'wrapper.cfbundle', 'c': 'sourcecode.c.c', 'cc': 'sourcecode.cpp.cpp', 'cpp': 'sourcecode.cpp.cpp', 'css': 'text.css', 'cxx': 'sourcecode.cpp.cpp', 'dart': 'sourcecode', 'dylib': 'compiled.mach-o.dylib', 'framework': 'wrapper.framework', 'gyp': 'sourcecode', 'gypi': 'sourcecode', 'h': 'sourcecode.c.h', 'hxx': 'sourcecode.cpp.h', 'icns': 'image.icns', 'java': 'sourcecode.java', 'js': 'sourcecode.javascript', 'kext': 'wrapper.kext', 'm': 'sourcecode.c.objc', 'mm': 'sourcecode.cpp.objcpp', 'nib': 'wrapper.nib', 'o': 'compiled.mach-o.objfile', 'pdf': 'image.pdf', 'pl': 'text.script.perl', 'plist': 'text.plist.xml', 'pm': 'text.script.perl', 'png': 'image.png', 'py': 'text.script.python', 'r': 'sourcecode.rez', 'rez': 'sourcecode.rez', 's': 'sourcecode.asm', 'storyboard': 'file.storyboard', 'strings': 'text.plist.strings', 'swift': 'sourcecode.swift', 'ttf': 'file', 'xcassets': 'folder.assetcatalog', 'xcconfig': 'text.xcconfig', 'xcdatamodel': 'wrapper.xcdatamodel', 'xcdatamodeld':'wrapper.xcdatamodeld', 'xib': 'file.xib', 'y': 'sourcecode.yacc', } prop_map = { 'dart': 'explicitFileType', 'gyp': 'explicitFileType', 'gypi': 'explicitFileType', } if is_dir: file_type = 'folder' prop_name = 'lastKnownFileType' else: basename = posixpath.basename(self._properties['path']) (root, ext) = posixpath.splitext(basename) # Check the map using a lowercase extension. # TODO(mark): Maybe it should try with the original case first and fall # back to lowercase, in case there are any instances where case # matters. There currently aren't. if ext != '': ext = ext[1:].lower() # TODO(mark): "text" is the default value, but "file" is appropriate # for unrecognized files not containing text. Xcode seems to choose # based on content. file_type = extension_map.get(ext, 'text') prop_name = prop_map.get(ext, 'lastKnownFileType') self._properties[prop_name] = file_type class PBXVariantGroup(PBXGroup, XCFileLikeElement): """PBXVariantGroup is used by Xcode to represent localizations.""" # No additions to the schema relative to PBXGroup. pass # PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below # because it uses PBXContainerItemProxy, defined below. class XCBuildConfiguration(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'baseConfigurationReference': [0, PBXFileReference, 0, 0], 'buildSettings': [0, dict, 0, 1, {}], 'name': [0, str, 0, 1], }) def HasBuildSetting(self, key): return key in self._properties['buildSettings'] def GetBuildSetting(self, key): return self._properties['buildSettings'][key] def SetBuildSetting(self, key, value): # TODO(mark): If a list, copy? self._properties['buildSettings'][key] = value def AppendBuildSetting(self, key, value): if not key in self._properties['buildSettings']: self._properties['buildSettings'][key] = [] self._properties['buildSettings'][key].append(value) def DelBuildSetting(self, key): if key in self._properties['buildSettings']: del self._properties['buildSettings'][key] def SetBaseConfiguration(self, value): self._properties['baseConfigurationReference'] = value class XCConfigurationList(XCObject): # _configs is the default list of configurations. _configs = [ XCBuildConfiguration({'name': 'Debug'}), XCBuildConfiguration({'name': 'Release'}) ] _schema = XCObject._schema.copy() _schema.update({ 'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs], 'defaultConfigurationIsVisible': [0, int, 0, 1, 1], 'defaultConfigurationName': [0, str, 0, 1, 'Release'], }) def Name(self): return 'Build configuration list for ' + \ self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"' def ConfigurationNamed(self, name): """Convenience accessor to obtain an XCBuildConfiguration by name.""" for configuration in self._properties['buildConfigurations']: if configuration._properties['name'] == name: return configuration raise KeyError(name) def DefaultConfiguration(self): """Convenience accessor to obtain the default XCBuildConfiguration.""" return self.ConfigurationNamed(self._properties['defaultConfigurationName']) def HasBuildSetting(self, key): """Determines the state of a build setting in all XCBuildConfiguration child objects. If all child objects have key in their build settings, and the value is the same in all child objects, returns 1. If no child objects have the key in their build settings, returns 0. If some, but not all, child objects have the key in their build settings, or if any children have different values for the key, returns -1. """ has = None value = None for configuration in self._properties['buildConfigurations']: configuration_has = configuration.HasBuildSetting(key) if has is None: has = configuration_has elif has != configuration_has: return -1 if configuration_has: configuration_value = configuration.GetBuildSetting(key) if value is None: value = configuration_value elif value != configuration_value: return -1 if not has: return 0 return 1 def GetBuildSetting(self, key): """Gets the build setting for key. All child XCConfiguration objects must have the same value set for the setting, or a ValueError will be raised. """ # TODO(mark): This is wrong for build settings that are lists. The list # contents should be compared (and a list copy returned?) value = None for configuration in self._properties['buildConfigurations']: configuration_value = configuration.GetBuildSetting(key) if value is None: value = configuration_value else: if value != configuration_value: raise ValueError('Variant values for ' + key) return value def SetBuildSetting(self, key, value): """Sets the build setting for key to value in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.SetBuildSetting(key, value) def AppendBuildSetting(self, key, value): """Appends value to the build setting for key, which is treated as a list, in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.AppendBuildSetting(key, value) def DelBuildSetting(self, key): """Deletes the build setting key from all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.DelBuildSetting(key) def SetBaseConfiguration(self, value): """Sets the build configuration in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.SetBaseConfiguration(value) class PBXBuildFile(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'fileRef': [0, XCFileLikeElement, 0, 1], 'settings': [0, str, 0, 0], # hack, it's a dict }) # Weird output rules for PBXBuildFile. _should_print_single_line = True _encode_transforms = XCObject._alternate_encode_transforms def Name(self): # Example: "main.cc in Sources" return self._properties['fileRef'].Name() + ' in ' + self.parent.Name() def Hashables(self): # super hashables = XCObject.Hashables(self) # It is not sufficient to just rely on Name() to get the # XCFileLikeElement's name, because that is not a complete pathname. # PathHashables returns hashables unique enough that no two # PBXBuildFiles should wind up with the same set of hashables, unless # someone adds the same file multiple times to the same target. That # would be considered invalid anyway. hashables.extend(self._properties['fileRef'].PathHashables()) return hashables class XCBuildPhase(XCObject): """Abstract base for build phase classes. Not represented in a project file. Attributes: _files_by_path: A dict mapping each path of a child in the files list by path (keys) to the corresponding PBXBuildFile children (values). _files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys) to the corresponding PBXBuildFile children (values). """ # TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't # actually have a "files" list. XCBuildPhase should not have "files" but # another abstract subclass of it should provide this, and concrete build # phase types that do have "files" lists should be derived from that new # abstract subclass. XCBuildPhase should only provide buildActionMask and # runOnlyForDeploymentPostprocessing, and not files or the various # file-related methods and attributes. _schema = XCObject._schema.copy() _schema.update({ 'buildActionMask': [0, int, 0, 1, 0x7fffffff], 'files': [1, PBXBuildFile, 1, 1, []], 'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCObject.__init__(self, properties, id, parent) self._files_by_path = {} self._files_by_xcfilelikeelement = {} for pbxbuildfile in self._properties.get('files', []): self._AddBuildFileToDicts(pbxbuildfile) def FileGroup(self, path): # Subclasses must override this by returning a two-element tuple. The # first item in the tuple should be the PBXGroup to which "path" should be # added, either as a child or deeper descendant. The second item should # be a boolean indicating whether files should be added into hierarchical # groups or one single flat group. raise NotImplementedError( self.__class__.__name__ + ' must implement FileGroup') def _AddPathToDict(self, pbxbuildfile, path): """Adds path to the dict tracking paths belonging to this build phase. If the path is already a member of this build phase, raises an exception. """ if path in self._files_by_path: raise ValueError('Found multiple build files with path ' + path) self._files_by_path[path] = pbxbuildfile def _AddBuildFileToDicts(self, pbxbuildfile, path=None): """Maintains the _files_by_path and _files_by_xcfilelikeelement dicts. If path is specified, then it is the path that is being added to the phase, and pbxbuildfile must contain either a PBXFileReference directly referencing that path, or it must contain a PBXVariantGroup that itself contains a PBXFileReference referencing the path. If path is not specified, either the PBXFileReference's path or the paths of all children of the PBXVariantGroup are taken as being added to the phase. If the path is already present in the phase, raises an exception. If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile are already present in the phase, referenced by a different PBXBuildFile object, raises an exception. This does not raise an exception when a PBXFileReference or PBXVariantGroup reappear and are referenced by the same PBXBuildFile that has already introduced them, because in the case of PBXVariantGroup objects, they may correspond to multiple paths that are not all added simultaneously. When this situation occurs, the path needs to be added to _files_by_path, but nothing needs to change in _files_by_xcfilelikeelement, and the caller should have avoided adding the PBXBuildFile if it is already present in the list of children. """ xcfilelikeelement = pbxbuildfile._properties['fileRef'] paths = [] if path != None: # It's best when the caller provides the path. if isinstance(xcfilelikeelement, PBXVariantGroup): paths.append(path) else: # If the caller didn't provide a path, there can be either multiple # paths (PBXVariantGroup) or one. if isinstance(xcfilelikeelement, PBXVariantGroup): for variant in xcfilelikeelement._properties['children']: paths.append(variant.FullPath()) else: paths.append(xcfilelikeelement.FullPath()) # Add the paths first, because if something's going to raise, the # messages provided by _AddPathToDict are more useful owing to its # having access to a real pathname and not just an object's Name(). for a_path in paths: self._AddPathToDict(pbxbuildfile, a_path) # If another PBXBuildFile references this XCFileLikeElement, there's a # problem. if xcfilelikeelement in self._files_by_xcfilelikeelement and \ self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile: raise ValueError('Found multiple build files for ' + \ xcfilelikeelement.Name()) self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile def AppendBuildFile(self, pbxbuildfile, path=None): # Callers should use this instead of calling # AppendProperty('files', pbxbuildfile) directly because this function # maintains the object's dicts. Better yet, callers can just call AddFile # with a pathname and not worry about building their own PBXBuildFile # objects. self.AppendProperty('files', pbxbuildfile) self._AddBuildFileToDicts(pbxbuildfile, path) def AddFile(self, path, settings=None): (file_group, hierarchical) = self.FileGroup(path) file_ref = file_group.AddOrGetFileByPath(path, hierarchical) if file_ref in self._files_by_xcfilelikeelement and \ isinstance(file_ref, PBXVariantGroup): # There's already a PBXBuildFile in this phase corresponding to the # PBXVariantGroup. path just provides a new variant that belongs to # the group. Add the path to the dict. pbxbuildfile = self._files_by_xcfilelikeelement[file_ref] self._AddBuildFileToDicts(pbxbuildfile, path) else: # Add a new PBXBuildFile to get file_ref into the phase. if settings is None: pbxbuildfile = PBXBuildFile({'fileRef': file_ref}) else: pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings}) self.AppendBuildFile(pbxbuildfile, path) class PBXHeadersBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Headers' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXResourcesBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Resources' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXSourcesBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Sources' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXFrameworksBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Frameworks' def FileGroup(self, path): (root, ext) = posixpath.splitext(path) if ext != '': ext = ext[1:].lower() if ext == 'o': # .o files are added to Xcode Frameworks phases, but conceptually aren't # frameworks, they're more like sources or intermediates. Redirect them # to show up in one of those other groups. return self.PBXProjectAncestor().RootGroupForPath(path) else: return (self.PBXProjectAncestor().FrameworksGroup(), False) class PBXShellScriptBuildPhase(XCBuildPhase): _schema = XCBuildPhase._schema.copy() _schema.update({ 'inputPaths': [1, str, 0, 1, []], 'name': [0, str, 0, 0], 'outputPaths': [1, str, 0, 1, []], 'shellPath': [0, str, 0, 1, '/bin/sh'], 'shellScript': [0, str, 0, 1], 'showEnvVarsInLog': [0, int, 0, 0], }) def Name(self): if 'name' in self._properties: return self._properties['name'] return 'ShellScript' class PBXCopyFilesBuildPhase(XCBuildPhase): _schema = XCBuildPhase._schema.copy() _schema.update({ 'dstPath': [0, str, 0, 1], 'dstSubfolderSpec': [0, int, 0, 1], 'name': [0, str, 0, 0], }) # path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is # "DIR", match group 3 is "path" or None. path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$') # path_tree_to_subfolder maps names of Xcode variables to the associated # dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object. path_tree_to_subfolder = { 'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory 'BUILT_PRODUCTS_DIR': 16, # Products Directory # Other types that can be chosen via the Xcode UI. # TODO(mark): Map Xcode variable names to these. # : 1, # Wrapper # : 6, # Executables: 6 # : 7, # Resources # : 15, # Java Resources # : 11, # Shared Frameworks # : 12, # Shared Support # : 13, # PlugIns } def Name(self): if 'name' in self._properties: return self._properties['name'] return 'CopyFiles' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) def SetDestination(self, path): """Set the dstSubfolderSpec and dstPath properties from path. path may be specified in the same notation used for XCHierarchicalElements, specifically, "$(DIR)/path". """ path_tree_match = self.path_tree_re.search(path) if path_tree_match: # Everything else needs to be relative to an Xcode variable. path_tree = path_tree_match.group(1) relative_path = path_tree_match.group(3) if path_tree in self.path_tree_to_subfolder: subfolder = self.path_tree_to_subfolder[path_tree] if relative_path is None: relative_path = '' else: # The path starts with an unrecognized Xcode variable # name like $(SRCROOT). Xcode will still handle this # as an "absolute path" that starts with the variable. subfolder = 0 relative_path = path elif path.startswith('/'): # Special case. Absolute paths are in dstSubfolderSpec 0. subfolder = 0 relative_path = path[1:] else: raise ValueError('Can\'t use path %s in a %s' % \ (path, self.__class__.__name__)) self._properties['dstPath'] = relative_path self._properties['dstSubfolderSpec'] = subfolder class PBXBuildRule(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'compilerSpec': [0, str, 0, 1], 'filePatterns': [0, str, 0, 0], 'fileType': [0, str, 0, 1], 'isEditable': [0, int, 0, 1, 1], 'outputFiles': [1, str, 0, 1, []], 'script': [0, str, 0, 0], }) def Name(self): # Not very inspired, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.append(self._properties['fileType']) if 'filePatterns' in self._properties: hashables.append(self._properties['filePatterns']) return hashables class PBXContainerItemProxy(XCObject): # When referencing an item in this project file, containerPortal is the # PBXProject root object of this project file. When referencing an item in # another project file, containerPortal is a PBXFileReference identifying # the other project file. # # When serving as a proxy to an XCTarget (in this project file or another), # proxyType is 1. When serving as a proxy to a PBXFileReference (in another # project file), proxyType is 2. Type 2 is used for references to the # producs of the other project file's targets. # # Xcode is weird about remoteGlobalIDString. Usually, it's printed without # a comment, indicating that it's tracked internally simply as a string, but # sometimes it's printed with a comment (usually when the object is initially # created), indicating that it's tracked as a project file object at least # sometimes. This module always tracks it as an object, but contains a hack # to prevent it from printing the comment in the project file output. See # _XCKVPrint. _schema = XCObject._schema.copy() _schema.update({ 'containerPortal': [0, XCContainerPortal, 0, 1], 'proxyType': [0, int, 0, 1], 'remoteGlobalIDString': [0, XCRemoteObject, 0, 1], 'remoteInfo': [0, str, 0, 1], }) def __repr__(self): props = self._properties name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo']) return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Name(self): # Admittedly not the best name, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.extend(self._properties['containerPortal'].Hashables()) hashables.extend(self._properties['remoteGlobalIDString'].Hashables()) return hashables class PBXTargetDependency(XCObject): # The "target" property accepts an XCTarget object, and obviously not # NoneType. But XCTarget is defined below, so it can't be put into the # schema yet. The definition of PBXTargetDependency can't be moved below # XCTarget because XCTarget's own schema references PBXTargetDependency. # Python doesn't deal well with this circular relationship, and doesn't have # a real way to do forward declarations. To work around, the type of # the "target" property is reset below, after XCTarget is defined. # # At least one of "name" and "target" is required. _schema = XCObject._schema.copy() _schema.update({ 'name': [0, str, 0, 0], 'target': [0, None.__class__, 0, 0], 'targetProxy': [0, PBXContainerItemProxy, 1, 1], }) def __repr__(self): name = self._properties.get('name') or self._properties['target'].Name() return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Name(self): # Admittedly not the best name, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.extend(self._properties['targetProxy'].Hashables()) return hashables class PBXReferenceProxy(XCFileLikeElement): _schema = XCFileLikeElement._schema.copy() _schema.update({ 'fileType': [0, str, 0, 1], 'path': [0, str, 0, 1], 'remoteRef': [0, PBXContainerItemProxy, 1, 1], }) class XCTarget(XCRemoteObject): # An XCTarget is really just an XCObject, the XCRemoteObject thing is just # to allow PBXProject to be used in the remoteGlobalIDString property of # PBXContainerItemProxy. # # Setting a "name" property at instantiation may also affect "productName", # which may in turn affect the "PRODUCT_NAME" build setting in children of # "buildConfigurationList". See __init__ below. _schema = XCRemoteObject._schema.copy() _schema.update({ 'buildConfigurationList': [0, XCConfigurationList, 1, 1, XCConfigurationList()], 'buildPhases': [1, XCBuildPhase, 1, 1, []], 'dependencies': [1, PBXTargetDependency, 1, 1, []], 'name': [0, str, 0, 1], 'productName': [0, str, 0, 1], }) def __init__(self, properties=None, id=None, parent=None, force_outdir=None, force_prefix=None, force_extension=None): # super XCRemoteObject.__init__(self, properties, id, parent) # Set up additional defaults not expressed in the schema. If a "name" # property was supplied, set "productName" if it is not present. Also set # the "PRODUCT_NAME" build setting in each configuration, but only if # the setting is not present in any build configuration. if 'name' in self._properties: if not 'productName' in self._properties: self.SetProperty('productName', self._properties['name']) if 'productName' in self._properties: if 'buildConfigurationList' in self._properties: configs = self._properties['buildConfigurationList'] if configs.HasBuildSetting('PRODUCT_NAME') == 0: configs.SetBuildSetting('PRODUCT_NAME', self._properties['productName']) def AddDependency(self, other): pbxproject = self.PBXProjectAncestor() other_pbxproject = other.PBXProjectAncestor() if pbxproject == other_pbxproject: # Add a dependency to another target in the same project file. container = PBXContainerItemProxy({'containerPortal': pbxproject, 'proxyType': 1, 'remoteGlobalIDString': other, 'remoteInfo': other.Name()}) dependency = PBXTargetDependency({'target': other, 'targetProxy': container}) self.AppendProperty('dependencies', dependency) else: # Add a dependency to a target in a different project file. other_project_ref = \ pbxproject.AddOrGetProjectReference(other_pbxproject)[1] container = PBXContainerItemProxy({ 'containerPortal': other_project_ref, 'proxyType': 1, 'remoteGlobalIDString': other, 'remoteInfo': other.Name(), }) dependency = PBXTargetDependency({'name': other.Name(), 'targetProxy': container}) self.AppendProperty('dependencies', dependency) # Proxy all of these through to the build configuration list. def ConfigurationNamed(self, name): return self._properties['buildConfigurationList'].ConfigurationNamed(name) def DefaultConfiguration(self): return self._properties['buildConfigurationList'].DefaultConfiguration() def HasBuildSetting(self, key): return self._properties['buildConfigurationList'].HasBuildSetting(key) def GetBuildSetting(self, key): return self._properties['buildConfigurationList'].GetBuildSetting(key) def SetBuildSetting(self, key, value): return self._properties['buildConfigurationList'].SetBuildSetting(key, \ value) def AppendBuildSetting(self, key, value): return self._properties['buildConfigurationList'].AppendBuildSetting(key, \ value) def DelBuildSetting(self, key): return self._properties['buildConfigurationList'].DelBuildSetting(key) # Redefine the type of the "target" property. See PBXTargetDependency._schema # above. PBXTargetDependency._schema['target'][1] = XCTarget class PBXNativeTarget(XCTarget): # buildPhases is overridden in the schema to be able to set defaults. # # NOTE: Contrary to most objects, it is advisable to set parent when # constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject # object. A parent reference is required for a PBXNativeTarget during # construction to be able to set up the target defaults for productReference, # because a PBXBuildFile object must be created for the target and it must # be added to the PBXProject's mainGroup hierarchy. _schema = XCTarget._schema.copy() _schema.update({ 'buildPhases': [1, XCBuildPhase, 1, 1, [PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]], 'buildRules': [1, PBXBuildRule, 1, 1, []], 'productReference': [0, PBXFileReference, 0, 1], 'productType': [0, str, 0, 1], }) # Mapping from Xcode product-types to settings. The settings are: # filetype : used for explicitFileType in the project file # prefix : the prefix for the file name # suffix : the suffix for the file name _product_filetypes = { 'com.apple.product-type.application': ['wrapper.application', '', '.app'], 'com.apple.product-type.application.watchapp': ['wrapper.application', '', '.app'], 'com.apple.product-type.watchkit-extension': ['wrapper.app-extension', '', '.appex'], 'com.apple.product-type.app-extension': ['wrapper.app-extension', '', '.appex'], 'com.apple.product-type.bundle': ['wrapper.cfbundle', '', '.bundle'], 'com.apple.product-type.framework': ['wrapper.framework', '', '.framework'], 'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib', 'lib', '.dylib'], 'com.apple.product-type.library.static': ['archive.ar', 'lib', '.a'], 'com.apple.product-type.tool': ['compiled.mach-o.executable', '', ''], 'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle', '', '.xctest'], 'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib', '', '.so'], 'com.apple.product-type.kernel-extension': ['wrapper.kext', '', '.kext'], } def __init__(self, properties=None, id=None, parent=None, force_outdir=None, force_prefix=None, force_extension=None): # super XCTarget.__init__(self, properties, id, parent) if 'productName' in self._properties and \ 'productType' in self._properties and \ not 'productReference' in self._properties and \ self._properties['productType'] in self._product_filetypes: products_group = None pbxproject = self.PBXProjectAncestor() if pbxproject != None: products_group = pbxproject.ProductsGroup() if products_group != None: (filetype, prefix, suffix) = \ self._product_filetypes[self._properties['productType']] # Xcode does not have a distinct type for loadable modules that are # pure BSD targets (not in a bundle wrapper). GYP allows such modules # to be specified by setting a target type to loadable_module without # having mac_bundle set. These are mapped to the pseudo-product type # com.googlecode.gyp.xcode.bundle. # # By picking up this special type and converting it to a dynamic # library (com.apple.product-type.library.dynamic) with fix-ups, # single-file loadable modules can be produced. # # MACH_O_TYPE is changed to mh_bundle to produce the proper file type # (as opposed to mh_dylib). In order for linking to succeed, # DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be # cleared. They are meaningless for type mh_bundle. # # Finally, the .so extension is forcibly applied over the default # (.dylib), unless another forced extension is already selected. # .dylib is plainly wrong, and .bundle is used by loadable_modules in # bundle wrappers (com.apple.product-type.bundle). .so seems an odd # choice because it's used as the extension on many other systems that # don't distinguish between linkable shared libraries and non-linkable # loadable modules, but there's precedent: Python loadable modules on # Mac OS X use an .so extension. if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle': self._properties['productType'] = \ 'com.apple.product-type.library.dynamic' self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle') self.SetBuildSetting('DYLIB_CURRENT_VERSION', '') self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '') if force_extension is None: force_extension = suffix[1:] if self._properties['productType'] == \ 'com.apple.product-type-bundle.unit.test': if force_extension is None: force_extension = suffix[1:] if force_extension is not None: # If it's a wrapper (bundle), set WRAPPER_EXTENSION. # Extension override. suffix = '.' + force_extension if filetype.startswith('wrapper.'): self.SetBuildSetting('WRAPPER_EXTENSION', force_extension) else: self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension) if filetype.startswith('compiled.mach-o.executable'): product_name = self._properties['productName'] product_name += suffix suffix = '' self.SetProperty('productName', product_name) self.SetBuildSetting('PRODUCT_NAME', product_name) # Xcode handles most prefixes based on the target type, however there # are exceptions. If a "BSD Dynamic Library" target is added in the # Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that # behavior. if force_prefix is not None: prefix = force_prefix if filetype.startswith('wrapper.'): self.SetBuildSetting('WRAPPER_PREFIX', prefix) else: self.SetBuildSetting('EXECUTABLE_PREFIX', prefix) if force_outdir is not None: self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir) # TODO(tvl): Remove the below hack. # http://code.google.com/p/gyp/issues/detail?id=122 # Some targets include the prefix in the target_name. These targets # really should just add a product_name setting that doesn't include # the prefix. For example: # target_name = 'libevent', product_name = 'event' # This check cleans up for them. product_name = self._properties['productName'] prefix_len = len(prefix) if prefix_len and (product_name[:prefix_len] == prefix): product_name = product_name[prefix_len:] self.SetProperty('productName', product_name) self.SetBuildSetting('PRODUCT_NAME', product_name) ref_props = { 'explicitFileType': filetype, 'includeInIndex': 0, 'path': prefix + product_name + suffix, 'sourceTree': 'BUILT_PRODUCTS_DIR', } file_ref = PBXFileReference(ref_props) products_group.AppendChild(file_ref) self.SetProperty('productReference', file_ref) def GetBuildPhaseByType(self, type): if not 'buildPhases' in self._properties: return None the_phase = None for phase in self._properties['buildPhases']: if isinstance(phase, type): # Some phases may be present in multiples in a well-formed project file, # but phases like PBXSourcesBuildPhase may only be present singly, and # this function is intended as an aid to GetBuildPhaseByType. Loop # over the entire list of phases and assert if more than one of the # desired type is found. assert the_phase is None the_phase = phase return the_phase def HeadersPhase(self): headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase) if headers_phase is None: headers_phase = PBXHeadersBuildPhase() # The headers phase should come before the resources, sources, and # frameworks phases, if any. insert_at = len(self._properties['buildPhases']) for index in xrange(0, len(self._properties['buildPhases'])): phase = self._properties['buildPhases'][index] if isinstance(phase, PBXResourcesBuildPhase) or \ isinstance(phase, PBXSourcesBuildPhase) or \ isinstance(phase, PBXFrameworksBuildPhase): insert_at = index break self._properties['buildPhases'].insert(insert_at, headers_phase) headers_phase.parent = self return headers_phase def ResourcesPhase(self): resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase) if resources_phase is None: resources_phase = PBXResourcesBuildPhase() # The resources phase should come before the sources and frameworks # phases, if any. insert_at = len(self._properties['buildPhases']) for index in xrange(0, len(self._properties['buildPhases'])): phase = self._properties['buildPhases'][index] if isinstance(phase, PBXSourcesBuildPhase) or \ isinstance(phase, PBXFrameworksBuildPhase): insert_at = index break self._properties['buildPhases'].insert(insert_at, resources_phase) resources_phase.parent = self return resources_phase def SourcesPhase(self): sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase) if sources_phase is None: sources_phase = PBXSourcesBuildPhase() self.AppendProperty('buildPhases', sources_phase) return sources_phase def FrameworksPhase(self): frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase) if frameworks_phase is None: frameworks_phase = PBXFrameworksBuildPhase() self.AppendProperty('buildPhases', frameworks_phase) return frameworks_phase def AddDependency(self, other): # super XCTarget.AddDependency(self, other) static_library_type = 'com.apple.product-type.library.static' shared_library_type = 'com.apple.product-type.library.dynamic' framework_type = 'com.apple.product-type.framework' if isinstance(other, PBXNativeTarget) and \ 'productType' in self._properties and \ self._properties['productType'] != static_library_type and \ 'productType' in other._properties and \ (other._properties['productType'] == static_library_type or \ ((other._properties['productType'] == shared_library_type or \ other._properties['productType'] == framework_type) and \ ((not other.HasBuildSetting('MACH_O_TYPE')) or other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))): file_ref = other.GetProperty('productReference') pbxproject = self.PBXProjectAncestor() other_pbxproject = other.PBXProjectAncestor() if pbxproject != other_pbxproject: other_project_product_group = \ pbxproject.AddOrGetProjectReference(other_pbxproject)[0] file_ref = other_project_product_group.GetChildByRemoteObject(file_ref) self.FrameworksPhase().AppendProperty('files', PBXBuildFile({'fileRef': file_ref})) class PBXAggregateTarget(XCTarget): pass class PBXProject(XCContainerPortal): # A PBXProject is really just an XCObject, the XCContainerPortal thing is # just to allow PBXProject to be used in the containerPortal property of # PBXContainerItemProxy. """ Attributes: path: "sample.xcodeproj". TODO(mark) Document me! _other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each value is a reference to the dict in the projectReferences list associated with the keyed PBXProject. """ _schema = XCContainerPortal._schema.copy() _schema.update({ 'attributes': [0, dict, 0, 0], 'buildConfigurationList': [0, XCConfigurationList, 1, 1, XCConfigurationList()], 'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'], 'hasScannedForEncodings': [0, int, 0, 1, 1], 'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()], 'projectDirPath': [0, str, 0, 1, ''], 'projectReferences': [1, dict, 0, 0], 'projectRoot': [0, str, 0, 1, ''], 'targets': [1, XCTarget, 1, 1, []], }) def __init__(self, properties=None, id=None, parent=None, path=None): self.path = path self._other_pbxprojects = {} # super return XCContainerPortal.__init__(self, properties, id, parent) def Name(self): name = self.path if name[-10:] == '.xcodeproj': name = name[:-10] return posixpath.basename(name) def Path(self): return self.path def Comment(self): return 'Project object' def Children(self): # super children = XCContainerPortal.Children(self) # Add children that the schema doesn't know about. Maybe there's a more # elegant way around this, but this is the only case where we need to own # objects in a dictionary (that is itself in a list), and three lines for # a one-off isn't that big a deal. if 'projectReferences' in self._properties: for reference in self._properties['projectReferences']: children.append(reference['ProductGroup']) return children def PBXProjectAncestor(self): return self def _GroupByName(self, name): if not 'mainGroup' in self._properties: self.SetProperty('mainGroup', PBXGroup()) main_group = self._properties['mainGroup'] group = main_group.GetChildByName(name) if group is None: group = PBXGroup({'name': name}) main_group.AppendChild(group) return group # SourceGroup and ProductsGroup are created by default in Xcode's own # templates. def SourceGroup(self): return self._GroupByName('Source') def ProductsGroup(self): return self._GroupByName('Products') # IntermediatesGroup is used to collect source-like files that are generated # by rules or script phases and are placed in intermediate directories such # as DerivedSources. def IntermediatesGroup(self): return self._GroupByName('Intermediates') # FrameworksGroup and ProjectsGroup are top-level groups used to collect # frameworks and projects. def FrameworksGroup(self): return self._GroupByName('Frameworks') def ProjectsGroup(self): return self._GroupByName('Projects') def RootGroupForPath(self, path): """Returns a PBXGroup child of this object to which path should be added. This method is intended to choose between SourceGroup and IntermediatesGroup on the basis of whether path is present in a source directory or an intermediates directory. For the purposes of this determination, any path located within a derived file directory such as PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates directory. The returned value is a two-element tuple. The first element is the PBXGroup, and the second element specifies whether that group should be organized hierarchically (True) or as a single flat list (False). """ # TODO(mark): make this a class variable and bind to self on call? # Also, this list is nowhere near exhaustive. # INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by # gyp.generator.xcode. There should probably be some way for that module # to push the names in, rather than having to hard-code them here. source_tree_groups = { 'DERIVED_FILE_DIR': (self.IntermediatesGroup, True), 'INTERMEDIATE_DIR': (self.IntermediatesGroup, True), 'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True), 'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True), } (source_tree, path) = SourceTreeAndPathFromPath(path) if source_tree != None and source_tree in source_tree_groups: (group_func, hierarchical) = source_tree_groups[source_tree] group = group_func() return (group, hierarchical) # TODO(mark): make additional choices based on file extension. return (self.SourceGroup(), True) def AddOrGetFileInRootGroup(self, path): """Returns a PBXFileReference corresponding to path in the correct group according to RootGroupForPath's heuristics. If an existing PBXFileReference for path exists, it will be returned. Otherwise, one will be created and returned. """ (group, hierarchical) = self.RootGroupForPath(path) return group.AddOrGetFileByPath(path, hierarchical) def RootGroupsTakeOverOnlyChildren(self, recurse=False): """Calls TakeOverOnlyChild for all groups in the main group.""" for group in self._properties['mainGroup']._properties['children']: if isinstance(group, PBXGroup): group.TakeOverOnlyChild(recurse) def SortGroups(self): # Sort the children of the mainGroup (like "Source" and "Products") # according to their defined order. self._properties['mainGroup']._properties['children'] = \ sorted(self._properties['mainGroup']._properties['children'], cmp=lambda x,y: x.CompareRootGroup(y)) # Sort everything else by putting group before files, and going # alphabetically by name within sections of groups and files. SortGroup # is recursive. for group in self._properties['mainGroup']._properties['children']: if not isinstance(group, PBXGroup): continue if group.Name() == 'Products': # The Products group is a special case. Instead of sorting # alphabetically, sort things in the order of the targets that # produce the products. To do this, just build up a new list of # products based on the targets. products = [] for target in self._properties['targets']: if not isinstance(target, PBXNativeTarget): continue product = target._properties['productReference'] # Make sure that the product is already in the products group. assert product in group._properties['children'] products.append(product) # Make sure that this process doesn't miss anything that was already # in the products group. assert len(products) == len(group._properties['children']) group._properties['children'] = products else: group.SortGroup() def AddOrGetProjectReference(self, other_pbxproject): """Add a reference to another project file (via PBXProject object) to this one. Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in this project file that contains a PBXReferenceProxy object for each product of each PBXNativeTarget in the other project file. ProjectRef is a PBXFileReference to the other project file. If this project file already references the other project file, the existing ProductGroup and ProjectRef are returned. The ProductGroup will still be updated if necessary. """ if not 'projectReferences' in self._properties: self._properties['projectReferences'] = [] product_group = None project_ref = None if not other_pbxproject in self._other_pbxprojects: # This project file isn't yet linked to the other one. Establish the # link. product_group = PBXGroup({'name': 'Products'}) # ProductGroup is strong. product_group.parent = self # There's nothing unique about this PBXGroup, and if left alone, it will # wind up with the same set of hashables as all other PBXGroup objects # owned by the projectReferences list. Add the hashables of the # remote PBXProject that it's related to. product_group._hashables.extend(other_pbxproject.Hashables()) # The other project reports its path as relative to the same directory # that this project's path is relative to. The other project's path # is not necessarily already relative to this project. Figure out the # pathname that this project needs to use to refer to the other one. this_path = posixpath.dirname(self.Path()) projectDirPath = self.GetProperty('projectDirPath') if projectDirPath: if posixpath.isabs(projectDirPath[0]): this_path = projectDirPath else: this_path = posixpath.join(this_path, projectDirPath) other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path) # ProjectRef is weak (it's owned by the mainGroup hierarchy). project_ref = PBXFileReference({ 'lastKnownFileType': 'wrapper.pb-project', 'path': other_path, 'sourceTree': 'SOURCE_ROOT', }) self.ProjectsGroup().AppendChild(project_ref) ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref} self._other_pbxprojects[other_pbxproject] = ref_dict self.AppendProperty('projectReferences', ref_dict) # Xcode seems to sort this list case-insensitively self._properties['projectReferences'] = \ sorted(self._properties['projectReferences'], cmp=lambda x,y: cmp(x['ProjectRef'].Name().lower(), y['ProjectRef'].Name().lower())) else: # The link already exists. Pull out the relevnt data. project_ref_dict = self._other_pbxprojects[other_pbxproject] product_group = project_ref_dict['ProductGroup'] project_ref = project_ref_dict['ProjectRef'] self._SetUpProductReferences(other_pbxproject, product_group, project_ref) inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False) targets = other_pbxproject.GetProperty('targets') if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets): dir_path = project_ref._properties['path'] product_group._hashables.extend(dir_path) return [product_group, project_ref] def _AllSymrootsUnique(self, target, inherit_unique_symroot): # Returns True if all configurations have a unique 'SYMROOT' attribute. # The value of inherit_unique_symroot decides, if a configuration is assumed # to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't # define an explicit value for 'SYMROOT'. symroots = self._DefinedSymroots(target) for s in self._DefinedSymroots(target): if (s is not None and not self._IsUniqueSymrootForTarget(s) or s is None and not inherit_unique_symroot): return False return True if symroots else inherit_unique_symroot def _DefinedSymroots(self, target): # Returns all values for the 'SYMROOT' attribute defined in all # configurations for this target. If any configuration doesn't define the # 'SYMROOT' attribute, None is added to the returned set. If all # configurations don't define the 'SYMROOT' attribute, an empty set is # returned. config_list = target.GetProperty('buildConfigurationList') symroots = set() for config in config_list.GetProperty('buildConfigurations'): setting = config.GetProperty('buildSettings') if 'SYMROOT' in setting: symroots.add(setting['SYMROOT']) else: symroots.add(None) if len(symroots) == 1 and None in symroots: return set() return symroots def _IsUniqueSymrootForTarget(self, symroot): # This method returns True if all configurations in target contain a # 'SYMROOT' attribute that is unique for the given target. A value is # unique, if the Xcode macro '$SRCROOT' appears in it in any form. uniquifier = ['$SRCROOT', '$(SRCROOT)'] if any(x in symroot for x in uniquifier): return True return False def _SetUpProductReferences(self, other_pbxproject, product_group, project_ref): # TODO(mark): This only adds references to products in other_pbxproject # when they don't exist in this pbxproject. Perhaps it should also # remove references from this pbxproject that are no longer present in # other_pbxproject. Perhaps it should update various properties if they # change. for target in other_pbxproject._properties['targets']: if not isinstance(target, PBXNativeTarget): continue other_fileref = target._properties['productReference'] if product_group.GetChildByRemoteObject(other_fileref) is None: # Xcode sets remoteInfo to the name of the target and not the name # of its product, despite this proxy being a reference to the product. container_item = PBXContainerItemProxy({ 'containerPortal': project_ref, 'proxyType': 2, 'remoteGlobalIDString': other_fileref, 'remoteInfo': target.Name() }) # TODO(mark): Does sourceTree get copied straight over from the other # project? Can the other project ever have lastKnownFileType here # instead of explicitFileType? (Use it if so?) Can path ever be # unset? (I don't think so.) Can other_fileref have name set, and # does it impact the PBXReferenceProxy if so? These are the questions # that perhaps will be answered one day. reference_proxy = PBXReferenceProxy({ 'fileType': other_fileref._properties['explicitFileType'], 'path': other_fileref._properties['path'], 'sourceTree': other_fileref._properties['sourceTree'], 'remoteRef': container_item, }) product_group.AppendChild(reference_proxy) def SortRemoteProductReferences(self): # For each remote project file, sort the associated ProductGroup in the # same order that the targets are sorted in the remote project file. This # is the sort order used by Xcode. def CompareProducts(x, y, remote_products): # x and y are PBXReferenceProxy objects. Go through their associated # PBXContainerItem to get the remote PBXFileReference, which will be # present in the remote_products list. x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString'] y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString'] x_index = remote_products.index(x_remote) y_index = remote_products.index(y_remote) # Use the order of each remote PBXFileReference in remote_products to # determine the sort order. return cmp(x_index, y_index) for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems(): # Build up a list of products in the remote project file, ordered the # same as the targets that produce them. remote_products = [] for target in other_pbxproject._properties['targets']: if not isinstance(target, PBXNativeTarget): continue remote_products.append(target._properties['productReference']) # Sort the PBXReferenceProxy children according to the list of remote # products. product_group = ref_dict['ProductGroup'] product_group._properties['children'] = sorted( product_group._properties['children'], cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp)) class XCProjectFile(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'archiveVersion': [0, int, 0, 1, 1], 'classes': [0, dict, 0, 1, {}], 'objectVersion': [0, int, 0, 1, 46], 'rootObject': [0, PBXProject, 1, 1], }) def ComputeIDs(self, recursive=True, overwrite=True, hash=None): # Although XCProjectFile is implemented here as an XCObject, it's not a # proper object in the Xcode sense, and it certainly doesn't have its own # ID. Pass through an attempt to update IDs to the real root object. if recursive: self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash) def Print(self, file=sys.stdout): self.VerifyHasRequiredProperties() # Add the special "objects" property, which will be caught and handled # separately during printing. This structure allows a fairly standard # loop do the normal printing. self._properties['objects'] = {} self._XCPrint(file, 0, '// !$*UTF8*$!\n') if self._should_print_single_line: self._XCPrint(file, 0, '{ ') else: self._XCPrint(file, 0, '{\n') for property, value in sorted(self._properties.iteritems(), cmp=lambda x, y: cmp(x, y)): if property == 'objects': self._PrintObjects(file) else: self._XCKVPrint(file, 1, property, value) self._XCPrint(file, 0, '}\n') del self._properties['objects'] def _PrintObjects(self, file): if self._should_print_single_line: self._XCPrint(file, 0, 'objects = {') else: self._XCPrint(file, 1, 'objects = {\n') objects_by_class = {} for object in self.Descendants(): if object == self: continue class_name = object.__class__.__name__ if not class_name in objects_by_class: objects_by_class[class_name] = [] objects_by_class[class_name].append(object) for class_name in sorted(objects_by_class): self._XCPrint(file, 0, '\n') self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n') for object in sorted(objects_by_class[class_name], cmp=lambda x, y: cmp(x.id, y.id)): object.Print(file) self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n') if self._should_print_single_line: self._XCPrint(file, 0, '}; ') else: self._XCPrint(file, 1, '};\n')
mit
HeinleinSupport/check_mk
python-pbr/lib/python/pbr/tests/test_util.py
10
2909
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. (HP) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import textwrap import six from six.moves import configparser import sys from pbr.tests import base from pbr import util class TestExtrasRequireParsingScenarios(base.BaseTestCase): scenarios = [ ('simple_extras', { 'config_text': """ [extras] first = foo bar==1.0 second = baz>=3.2 foo """, 'expected_extra_requires': { 'first': ['foo', 'bar==1.0'], 'second': ['baz>=3.2', 'foo'], 'test': ['requests-mock'], "test:(python_version=='2.6')": ['ordereddict'], } }), ('with_markers', { 'config_text': """ [extras] test = foo:python_version=='2.6' bar baz<1.6 :python_version=='2.6' zaz :python_version>'1.0' """, 'expected_extra_requires': { "test:(python_version=='2.6')": ['foo', 'baz<1.6'], "test": ['bar', 'zaz']}}), ('no_extras', { 'config_text': """ [metadata] long_description = foo """, 'expected_extra_requires': {} })] def config_from_ini(self, ini): config = {} if sys.version_info >= (3, 2): parser = configparser.ConfigParser() else: parser = configparser.SafeConfigParser() ini = textwrap.dedent(six.u(ini)) parser.readfp(io.StringIO(ini)) for section in parser.sections(): config[section] = dict(parser.items(section)) return config def test_extras_parsing(self): config = self.config_from_ini(self.config_text) kwargs = util.setup_cfg_to_setup_kwargs(config) self.assertEqual(self.expected_extra_requires, kwargs['extras_require']) class TestInvalidMarkers(base.BaseTestCase): def test_invalid_marker_raises_error(self): config = {'extras': {'test': "foo :bad_marker>'1.0'"}} self.assertRaises(SyntaxError, util.setup_cfg_to_setup_kwargs, config)
gpl-2.0
code-for-india/sahana_shelter_worldbank
private/templates/EUROSHA/controllers.py
1
13226
# -*- coding: utf-8 -*- from os import path from gluon import * from gluon.storage import Storage from s3 import * # ============================================================================= class index(): """ Custom Home Page """ def __call__(self): request = current.request response = current.response response.title = current.deployment_settings.get_system_name() T = current.T db = current.db auth = current.auth s3db = current.s3db s3 = response.s3 appname = request.application settings = current.deployment_settings has_module = settings.has_module if has_module("cr"): table = s3db.cr_shelter SHELTERS = s3.crud_strings["cr_shelter"].title_list else: SHELTERS = "" # Menu Boxes menu_btns = [#div, label, app, function #["col1", T("Staff"), "hrm", "staff"], #["col1", T("Volunteers"), "vol", "volunteer"], ["col1", T("Projects"), "project", "project"], ["col1", T("Vehicles"), "vehicle", "vehicle"], ["col2", T("Assets"), "asset", "asset"], ["col2", T("Inventory Items"), "inv", "inv_item"], #["facility", T("Facilities"), "org", "facility"], ["facility", T("Hospitals"), "hms", "hospital"], ["facility", T("Offices"), "org", "office"], ["facility", SHELTERS, "cr", "shelter"], ["facility", T("Transport"), "transport", "index"], ["facility", T("Warehouses"), "inv", "warehouse"], ] menu_divs = {"col1": DIV(_id="menu_div_col1", _class="menu_div"), "col2": DIV(_id="menu_div_col2", _class="menu_div"), "facility": DIV(H3(T("Facilities")), _id = "facility_box", _class = "menu_box"), } for div, label, app, function in menu_btns: if has_module(app): # @ToDo: Also check permissions (e.g. for anonymous users) menu_divs[div].append(A(DIV(label, _class="menu-btn-r"), _class="menu-btn-l", _href = URL(app, function) ) ) cols_box = DIV(H3(T("Humanitarian Projects")), DIV(_id="menu_div_col0"), menu_divs["col1"], menu_divs["col2"], _id="cols_box", #_class="menu_box fleft swidth" _class="menu_box" ) facility_box = menu_divs["facility"] facility_box.append(A(IMG(_src="/%s/static/img/map_icon_128.png" % \ appname), _href = URL(c="gis", f="index"), _title = T("Map") ) ) datatable_ajax_source = "" # Check logged in AND permissions roles = current.session.s3.roles system_roles = auth.get_system_roles() AUTHENTICATED = system_roles.AUTHENTICATED table = s3db.org_organisation has_permission = auth.s3_has_permission if AUTHENTICATED in roles and \ has_permission("read", table): org_items = self.organisation() datatable_ajax_source = "/%s/default/organisation.aadata" % \ appname s3.actions = None permit = auth.permission permit.controller = "org" permit.function = "site" permitted_facilities = auth.permitted_facilities(redirect_on_error=False) if permitted_facilities: facilities = s3db.org_SiteRepresent().bulk(permitted_facilities) facility_list = [(fac, facilities[fac]) for fac in facilities] facility_list = sorted(facility_list, key=lambda fac: fac[1]) facility_opts = [OPTION(fac[1], _value=fac[0]) for fac in facility_list] manage_facility_box = DIV(H3(T("Manage Your Facilities")), SELECT(_id = "manage_facility_select", _style = "max-width:400px;", *facility_opts ), A(T("Go"), _href = URL(c="default", f="site", args=[facility_list[0][0]]), #_disabled = "disabled", _id = "manage_facility_btn", _class = "action-btn" ), _id = "manage_facility_box", _class = "menu_box fleft" ) s3.jquery_ready.append( '''$('#manage_facility_select').change(function(){ $('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val())) })''') else: manage_facility_box = "" if has_permission("create", table): create = A(T("Create Organization"), _href = URL(c="org", f="organisation", args=["create"]), _id = "add-btn", _class = "action-btn", _style = "margin-right: 10px;") else: create = "" org_box = DIV(H3(T("Organizations")), create, org_items, _id = "org_box", _class = "menu_box fleft" ) else: manage_facility_box = "" org_box = "" # Login/Registration forms self_registration = settings.get_security_self_registration() registered = False login_form = None login_div = None register_form = None register_div = None if AUTHENTICATED not in roles: # This user isn't yet logged-in if request.cookies.has_key("registered"): # This browser has logged-in before registered = True if self_registration: # Provide a Registration box on front page register_form = auth.s3_registration_form() register_div = DIV(H3(T("Register")), P(XML(T("If you would like to help, then please %(sign_up_now)s") % \ dict(sign_up_now=B(T("sign-up now")))))) if request.env.request_method == "POST": post_script = \ '''$('#register_form').removeClass('hide') $('#login_form').addClass('hide')''' else: post_script = "" register_script = \ '''$('#register-btn').attr('href','#register') $('#login-btn').attr('href','#login') %s $('#register-btn').click(function(){ $('#register_form').removeClass('hide') $('#login_form').addClass('hide') }) $('#login-btn').click(function(){ $('#register_form').addClass('hide') $('#login_form').removeClass('hide') })''' % post_script s3.jquery_ready.append(register_script) # Provide a login box on front page request.args = ["login"] auth.messages.submit_button = T("Login") login_form = auth() login_div = DIV(H3(T("Login")), P(XML(T("Registered users can %(login)s to access the system") % \ dict(login=B(T("login")))))) if settings.frontpage.rss: s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css") s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard") s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js") counter = 0 feeds = "" for feed in settings.frontpage.rss: counter += 1 feeds = "".join((feeds, "{title:'%s',\n" % feed["title"], "url:'%s'}" % feed["url"])) # Don't add a trailing comma for old IEs if counter != len(settings.frontpage.rss): feeds += ",\n" # feedCycleTime: milliseconds before feed is reloaded (5 minutes) feed_control = "".join((''' function LoadDynamicFeedControl(){ var feeds=[ ''', feeds, ''' ] var options={ feedCycleTime:300000, numResults:5, stacked:true, horizontal:false, title:"''', str(T("News")), '''" } new GFdynamicFeedControl(feeds,'feed-control',options) } google.load('feeds','1') google.setOnLoadCallback(LoadDynamicFeedControl)''')) s3.js_global.append(feed_control) view = path.join(request.folder, "private", "templates", "EUROSHA", "views", "index.html") try: # Pass view as file not str to work in compiled mode response.view = open(view, "rb") except IOError: from gluon.http import HTTP raise HTTP(404, "Unable to open Custom View: %s" % view) return dict(title = response.title, cols_box = cols_box, facility_box = facility_box, manage_facility_box = manage_facility_box, org_box = org_box, r = None, # Required for dataTable to work datatable_ajax_source = datatable_ajax_source, self_registration=self_registration, registered=registered, login_form=login_form, login_div=login_div, register_form=register_form, register_div=register_div ) # ------------------------------------------------------------------------- @staticmethod def organisation(): """ Function to handle pagination for the org list on the homepage """ request = current.request get_vars = request.get_vars resource = current.s3db.resource("org_organisation") totalrows = resource.count() if "iDisplayLength" in get_vars: display_length = int(get_vars["iDisplayLength"]) else: display_length = 10 limit = 4 * display_length list_fields = ["id", "name"] filter, orderby, left = resource.datatable_filter(list_fields, get_vars) resource.add_filter(filter) data = resource.select(list_fields, start=0, limit=limit, orderby=orderby, left=left, count=True, represent=True) filteredrows = data["numrows"] rfields = data["rfields"] rows = data["rows"] dt = S3DataTable(rfields, rows) dt_id = "org_dt" if request.extension == "html": dt.defaultActionButtons(resource) current.response.s3.no_formats = True items = dt.html(totalrows, filteredrows, dt_id, dt_displayLength=display_length, dt_ajax_url=URL(c="default", f="organisation", extension="aadata", vars={"id": dt_id}, ), dt_pagination="true", ) elif request.extension == "aadata": if "sEcho" in get_vars: echo = int(get_vars.sEcho) else: echo = None items = dt.json(totalrows, filteredrows, dt_id, echo) current.response.headers["Content-Type"] = "application/json" else: from gluon.http import HTTP raise HTTP(501, current.ERROR.BAD_FORMAT) return items # END =========================================================================
mit
fengbaicanhe/intellij-community
python/lib/Lib/encodings/mac_iceland.py
593
13754
""" Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-iceland', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> CONTROL CHARACTER u'\x01' # 0x01 -> CONTROL CHARACTER u'\x02' # 0x02 -> CONTROL CHARACTER u'\x03' # 0x03 -> CONTROL CHARACTER u'\x04' # 0x04 -> CONTROL CHARACTER u'\x05' # 0x05 -> CONTROL CHARACTER u'\x06' # 0x06 -> CONTROL CHARACTER u'\x07' # 0x07 -> CONTROL CHARACTER u'\x08' # 0x08 -> CONTROL CHARACTER u'\t' # 0x09 -> CONTROL CHARACTER u'\n' # 0x0A -> CONTROL CHARACTER u'\x0b' # 0x0B -> CONTROL CHARACTER u'\x0c' # 0x0C -> CONTROL CHARACTER u'\r' # 0x0D -> CONTROL CHARACTER u'\x0e' # 0x0E -> CONTROL CHARACTER u'\x0f' # 0x0F -> CONTROL CHARACTER u'\x10' # 0x10 -> CONTROL CHARACTER u'\x11' # 0x11 -> CONTROL CHARACTER u'\x12' # 0x12 -> CONTROL CHARACTER u'\x13' # 0x13 -> CONTROL CHARACTER u'\x14' # 0x14 -> CONTROL CHARACTER u'\x15' # 0x15 -> CONTROL CHARACTER u'\x16' # 0x16 -> CONTROL CHARACTER u'\x17' # 0x17 -> CONTROL CHARACTER u'\x18' # 0x18 -> CONTROL CHARACTER u'\x19' # 0x19 -> CONTROL CHARACTER u'\x1a' # 0x1A -> CONTROL CHARACTER u'\x1b' # 0x1B -> CONTROL CHARACTER u'\x1c' # 0x1C -> CONTROL CHARACTER u'\x1d' # 0x1D -> CONTROL CHARACTER u'\x1e' # 0x1E -> CONTROL CHARACTER u'\x1f' # 0x1F -> CONTROL CHARACTER u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> CONTROL CHARACTER u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE u'\xb0' # 0xA1 -> DEGREE SIGN u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa7' # 0xA4 -> SECTION SIGN u'\u2022' # 0xA5 -> BULLET u'\xb6' # 0xA6 -> PILCROW SIGN u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S u'\xae' # 0xA8 -> REGISTERED SIGN u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u2122' # 0xAA -> TRADE MARK SIGN u'\xb4' # 0xAB -> ACUTE ACCENT u'\xa8' # 0xAC -> DIAERESIS u'\u2260' # 0xAD -> NOT EQUAL TO u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE u'\u221e' # 0xB0 -> INFINITY u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO u'\xa5' # 0xB4 -> YEN SIGN u'\xb5' # 0xB5 -> MICRO SIGN u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL u'\u2211' # 0xB7 -> N-ARY SUMMATION u'\u220f' # 0xB8 -> N-ARY PRODUCT u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI u'\u222b' # 0xBA -> INTEGRAL u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA u'\xe6' # 0xBE -> LATIN SMALL LETTER AE u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE u'\xbf' # 0xC0 -> INVERTED QUESTION MARK u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK u'\xac' # 0xC2 -> NOT SIGN u'\u221a' # 0xC3 -> SQUARE ROOT u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK u'\u2248' # 0xC5 -> ALMOST EQUAL TO u'\u2206' # 0xC6 -> INCREMENT u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS u'\xa0' # 0xCA -> NO-BREAK SPACE u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE u'\u2013' # 0xD0 -> EN DASH u'\u2014' # 0xD1 -> EM DASH u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK u'\xf7' # 0xD6 -> DIVISION SIGN u'\u25ca' # 0xD7 -> LOZENGE u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\u2044' # 0xDA -> FRACTION SLASH u'\u20ac' # 0xDB -> EURO SIGN u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE u'\xb7' # 0xE1 -> MIDDLE DOT u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK u'\u2030' # 0xE4 -> PER MILLE SIGN u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\uf8ff' # 0xF0 -> Apple logo u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT u'\u02dc' # 0xF7 -> SMALL TILDE u'\xaf' # 0xF8 -> MACRON u'\u02d8' # 0xF9 -> BREVE u'\u02d9' # 0xFA -> DOT ABOVE u'\u02da' # 0xFB -> RING ABOVE u'\xb8' # 0xFC -> CEDILLA u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT u'\u02db' # 0xFE -> OGONEK u'\u02c7' # 0xFF -> CARON ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
MechanisM/musicdb
contrib/debug_toolbar/utils/sqlparse/lexer.py
18
11470
# -*- coding: utf-8 -*- # Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com # # This module is part of python-sqlparse and is released under # the BSD License: http://www.opensource.org/licenses/bsd-license.php. """SQL Lexer""" # This code is based on the SqlLexer in pygments. # http://pygments.org/ # It's separated from the rest of pygments to increase performance # and to allow some customizations. import re from debug_toolbar.utils.sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON from debug_toolbar.utils.sqlparse.tokens import * from debug_toolbar.utils.sqlparse.tokens import _TokenType class include(str): pass class combined(tuple): """Indicates a state combined from multiple states.""" def __new__(cls, *args): return tuple.__new__(cls, args) def __init__(self, *args): # tuple.__init__ doesn't do anything pass def is_keyword(value): test = value.upper() return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, Name)), value def apply_filters(stream, filters, lexer=None): """ Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`. """ def _apply(filter_, stream): for token in filter_.filter(lexer, stream): yield token for filter_ in filters: stream = _apply(filter_, stream) return stream class LexerMeta(type): """ Metaclass for Lexer, creates the self._tokens attribute from self.tokens on the first instantiation. """ def _process_state(cls, unprocessed, processed, state): assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] tokens = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef try: rex = re.compile(tdef[0], rflags).match except Exception, err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err)) assert type(tdef[1]) is _TokenType or callable(tdef[1]), \ 'token type must be simple type or callable, not %r' % (tdef[1],) if len(tdef) == 2: new_state = None else: tdef2 = tdef[2] if isinstance(tdef2, str): # an existing state if tdef2 == '#pop': new_state = -1 elif tdef2 in unprocessed: new_state = (tdef2,) elif tdef2 == '#push': new_state = tdef2 elif tdef2[:5] == '#pop:': new_state = -int(tdef2[5:]) else: assert False, 'unknown new state %r' % tdef2 elif isinstance(tdef2, combined): # combine a new state from existing ones new_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in tdef2: assert istate != state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[new_state] = itokens new_state = (new_state,) elif isinstance(tdef2, tuple): # push more than one state for state in tdef2: assert (state in unprocessed or state in ('#pop', '#push')), \ 'unknown new state ' + state new_state = tdef2 else: assert False, 'unknown new state def %r' % tdef2 tokens.append((rex, tdef[1], new_state)) return tokens def process_tokendef(cls): cls._all_tokens = {} cls._tmpname = 0 processed = cls._all_tokens[cls.__name__] = {} #tokendefs = tokendefs or cls.tokens[name] for state in cls.tokens.keys(): cls._process_state(cls.tokens, processed, state) return processed def __call__(cls, *args, **kwds): if not hasattr(cls, '_tokens'): cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef() return type.__call__(cls, *args, **kwds) class Lexer: __metaclass__ = LexerMeta encoding = 'utf-8' stripall = False stripnl = False tabsize = 0 flags = re.IGNORECASE tokens = { 'root': [ (r'--.*?(\r|\n|\r\n)', Comment.Single), (r'(\r|\n|\r\n)', Newline), (r'\s+', Whitespace), (r'/\*', Comment.Multiline, 'multiline-comments'), (r':=', Assignment), (r'::', Punctuation), (r'[*]', Wildcard), (r"`(``|[^`])*`", Name), (r"´(´´|[^´])*´", Name), (r'@[a-zA-Z_][a-zA-Z0-9_]+', Name), (r'[+/<>=~!@#%^&|`?^-]', Operator), (r'[0-9]+', Number.Integer), # TODO: Backslash escapes? (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN', Keyword), (r'END( IF| LOOP)?', Keyword), (r'CREATE( OR REPLACE)?', Keyword.DDL), (r'[a-zA-Z_][a-zA-Z0-9_]*', is_keyword), (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', Name.Builtin), (r'[;:()\[\],\.]', Punctuation), ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), (r'[^/\*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } def __init__(self): self.filters = [] def add_filter(self, filter_, **options): from sqlparse.filters import Filter if not isinstance(filter_, Filter): filter_ = filter_(**options) self.filters.append(filter_) def get_tokens(self, text, unfiltered=False): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. """ if not isinstance(text, unicode): if self.encoding == 'guess': try: text = text.decode('utf-8') if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] except UnicodeDecodeError: text = text.decode('latin1') elif self.encoding == 'chardet': try: import chardet except ImportError: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') enc = chardet.detect(text) text = text.decode(enc['encoding']) else: text = text.decode(self.encoding) if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) # if not text.endswith('\n'): # text += '\n' def streamer(): for i, t, v in self.get_tokens_unprocessed(text): yield t, v stream = streamer() if not unfiltered: stream = apply_filters(stream, self.filters, self) return stream def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] known_names = {} while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: # print rex.pattern value = m.group() if value in known_names: yield pos, known_names[value], value elif type(action) is _TokenType: yield pos, action, value elif hasattr(action, '__call__'): ttype, value = action(value) known_names[value] = ttype yield pos, ttype, value else: for item in action(self, m): yield item pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[statestack[-1]] break else: try: if text[pos] == '\n': # at EOL, reset state to "root" pos += 1 statestack = ['root'] statetokens = tokendefs['root'] yield pos, Text, u'\n' continue yield pos, Error, text[pos] pos += 1 except IndexError: break def tokenize(sql): """Tokenize sql. Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream of ``(token type, value)`` items. """ lexer = Lexer() return lexer.get_tokens(sql)
agpl-3.0
mcking49/apache-flask
Python/Lib/idlelib/configSectionNameDialog.py
37
3979
""" Dialog that allows user to specify a new config file section name. Used to get new highlight theme and keybinding set names. The 'return value' for the dialog, used two placed in configDialog.py, is the .result attribute set in the Ok and Cancel methods. """ from Tkinter import * import tkMessageBox class GetCfgSectionNameDialog(Toplevel): def __init__(self, parent, title, message, used_names, _htest=False): """ message - string, informational message to display used_names - string collection, names already in use for validity check _htest - bool, change box location when running htest """ Toplevel.__init__(self, parent) self.configure(borderwidth=5) self.resizable(height=FALSE, width=FALSE) self.title(title) self.transient(parent) self.grab_set() self.protocol("WM_DELETE_WINDOW", self.Cancel) self.parent = parent self.message = message self.used_names = used_names self.create_widgets() self.withdraw() #hide while setting geometry self.update_idletasks() #needs to be done here so that the winfo_reqwidth is valid self.messageInfo.config(width=self.frameMain.winfo_reqwidth()) self.geometry( "+%d+%d" % ( parent.winfo_rootx() + (parent.winfo_width()/2 - self.winfo_reqwidth()/2), parent.winfo_rooty() + ((parent.winfo_height()/2 - self.winfo_reqheight()/2) if not _htest else 100) ) ) #centre dialog over parent (or below htest box) self.deiconify() #geometry set, unhide self.wait_window() def create_widgets(self): self.name = StringVar(self.parent) self.fontSize = StringVar(self.parent) self.frameMain = Frame(self, borderwidth=2, relief=SUNKEN) self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH) self.messageInfo = Message(self.frameMain, anchor=W, justify=LEFT, padx=5, pady=5, text=self.message) #,aspect=200) entryName = Entry(self.frameMain, textvariable=self.name, width=30) entryName.focus_set() self.messageInfo.pack(padx=5, pady=5) #, expand=TRUE, fill=BOTH) entryName.pack(padx=5, pady=5) frameButtons = Frame(self, pady=2) frameButtons.pack(side=BOTTOM) self.buttonOk = Button(frameButtons, text='Ok', width=8, command=self.Ok) self.buttonOk.pack(side=LEFT, padx=5) self.buttonCancel = Button(frameButtons, text='Cancel', width=8, command=self.Cancel) self.buttonCancel.pack(side=RIGHT, padx=5) def name_ok(self): ''' After stripping entered name, check that it is a sensible ConfigParser file section name. Return it if it is, '' if not. ''' name = self.name.get().strip() if not name: #no name specified tkMessageBox.showerror(title='Name Error', message='No name specified.', parent=self) elif len(name)>30: #name too long tkMessageBox.showerror(title='Name Error', message='Name too long. It should be no more than '+ '30 characters.', parent=self) name = '' elif name in self.used_names: tkMessageBox.showerror(title='Name Error', message='This name is already in use.', parent=self) name = '' return name def Ok(self, event=None): name = self.name_ok() if name: self.result = name self.destroy() def Cancel(self, event=None): self.result = '' self.destroy() if __name__ == '__main__': import unittest unittest.main('idlelib.idle_test.test_config_name', verbosity=2, exit=False) from idlelib.idle_test.htest import run run(GetCfgSectionNameDialog)
mit
albertoferna/compmech
compmech/conecyl/imperfections/imperfections.py
3
6978
import os from random import sample import numpy as np from numpy import cos from scipy.linalg import lstsq from compmech.constants import CMHOME from compmech.logger import * def load_c0(name, funcnum, m0, n0): path = os.path.join(CMHOME, 'conecyl', 'imperfections', 'c0', 'c0_{0}_f{1}_m{2:03d}_n{3:03d}.txt'.format( name, funcnum, m0, n0)) if os.path.isfile(path): return np.loadtxt(path) else: raise ValueError('Coefficient file not found!') def calc_c0(path, m0=40, n0=40, funcnum=2, sample_size=None, maxmem=8, save=True, offset_w0=None): r"""Find the coefficients `c_0` that best fit the `w_0` function. The measured data will be fit using one of the following functions, selected using the ``funcnum`` parameter: ``funcnum=1`` .. math:: w_0 = \sum_{i=1}^{m_0}{ \sum_{j=0}^{n_0}{ c_{ij}^a sin{b_x} sin{b_\theta} +c_{ij}^b sin{b_x} cos{b_\theta}}} ``funcnum=2`` (default) .. math:: w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{ c_{ij}^a cos{b_x} sin{b_\theta} +c_{ij}^b cos{b_x} cos{b_\theta}}} ``funcnum=3`` .. math:: w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{ c_{ij}^a sin{b_x} sin{b_\theta} +c_{ij}^b sin{b_x} cos{b_\theta} +c_{ij}^c cos{b_x} sin{b_\theta} +c_{ij}^d cos{b_x} cos{b_\theta}}} where: .. math:: b_x = i \pi \frac x L_{points} b_\theta = j \theta where `L_{points}` represents the difference between the maximum and the height values in the imperfection file divided by the cosine of the semi-vertex angle: .. math:: L_{points} = \frac{H_{max} - H_{min}}{cos(\alpha)} = \frac{H_{points}}{cos(\alpha)} In this form `{}^x/_{L_{points}}` will vary from `0.` (at the top) to `1.` (at the bottom). .. note:: Note that if the measured sample does not cover all the height, **it will be stretched**. The approximation can be written in matrix form as: .. math:: w_0 = [g] \{c\} where `[g]` carries the base functions and `{c}` the respective amplitudes. The solution consists on finding the best `\{c\}` that minimizes the least-square error between the measured imperfection pattern and the `w_0` function. Parameters ---------- path : str or numpy.ndarray The path of the file containing the data. Can be a full path using ``r"C:\Temp\inputfile.txt"``, for example. The input file must have 3 columns: `\theta`, `height`, `imp`; expressed in Cartesian coordinates. This input can also be a ``numpy.ndarray`` object, with `\theta`, `height`, `imp` in each corresponding column. m0 : int Number of terms along the meridian (`x`). n0 : int Number of terms along the circumference (`\theta`). funcnum : int, optional As explained above, selects the base functions used for the approximation. sample_size : int or None, optional Specifies how many points of the imperfection file should be used. If ``None`` all points will be used in the computations. maxmem : int, optional Maximum RAM memory in GB allowed to compute the base functions. The ``scipy.interpolate.lstsq`` will go beyond this limit. save : bool, optional If ``True`` saves the calculated coefficients in the ``compmech/conecyl/imperfections/c0`` folder. Returns ------- out : numpy.ndarray A 1-D array with the best-fit coefficients. """ import mgi if isinstance(path, np.ndarray): input_pts = path path = 'unnamed.txt' else: input_pts = np.loadtxt(path) if input_pts.shape[1] != 3: raise ValueError('Input does not have the format: "theta, x, imp"') log('Finding w0 coefficients for {0},\n\tusing funcnum {1}'.format( str(os.path.basename(path)), funcnum)) if sample_size: num = input_pts.shape[0] if sample_size < num: input_pts = input_pts[sample(range(num), int(sample_size))] if funcnum==1: size = 2 elif funcnum==2: size = 2 elif funcnum==3: size = 4 else: raise ValueError('Valid values for "funcnum" are 1, 2 or 3') maxnum = maxmem*1024*1024*1024*8/(64*size*m0*n0) num = input_pts.shape[0] if num >= maxnum: input_pts = input_pts[sample(range(num), int(maxnum))] warn('Reducing sample size from {0} to {1} ' + 'due to the "maxmem" specified'.format(num, maxnum), level=1) thetas = input_pts[:, 0].copy() xs = input_pts[:, 1] w0pts = input_pts[:, 2] if offset_w0: w0pts += offset_w0 # normalizing x xs = (xs - xs.min())/(xs.max() - xs.min()) # inverting x to cope with the coordsys of the semi-analytical model xs = 1 - xs a = mgi.fa(m0, n0, xs, thetas, funcnum=funcnum) log('Base functions calculated', level=1) try: c0, residues, rank, s = lstsq(a, w0pts) except MemoryError: error('Reduce the "maxmem" parameter!') log('Finished scipy.linalg.lstsq', level=1) if save: name = '.'.join(os.path.basename(path).split('.')[0:-1]) outpath = os.path.join(CMHOME, 'conecyl', 'imperfections', 'c0', 'c0_{0}_f{1}_m{2:03d}_n{3:03d}.txt'.format( name, funcnum, m0, n0)) np.savetxt(outpath, c0) return c0, residues def fw0(m0, n0, c0, xs_norm, ts, funcnum=2): r"""Calculates the imperfection field `w_0` for a given input. Parameters ---------- m0 : int The number of terms along the meridian. n0 : int The number of terms along the circumference. c0 : numpy.ndarray The coefficients of the imperfection pattern. xs_norm : numpy.ndarray The meridian coordinate (`x`) normalized to be between ``0.`` and ``1.``. ts : numpy.ndarray The angles in radians representing the circumferential coordinate (`\theta`). funcnum : int, optional The function used for the approximation (see the ``calc_c0`` function) Notes ----- The inputs ``xs_norm`` and ``ts`` must be of the same size. If ``funcnum==1 or funcnum==2`` then ``size=2``, if ``funcnum==3`` then ``size=4`` and the inputs must satisfy ``c0.shape[0] == size*m0*n0``. """ if xs_norm.shape != ts.shape: raise ValueError('xs_norm and ts must have the same shape') if funcnum==1: size = 2 elif funcnum==2: size = 2 elif funcnum==3: size = 4 if c0.shape[0] != size*m0*n0: raise ValueError('Invalid c0 for the given m0 and n0!') import mgi w0s = mgi.fw0(m0, n0, c0, xs_norm.ravel(), ts.ravel(), funcnum) return w0s.reshape(xs_norm.shape)
bsd-3-clause
viur-framework/server
prototypes/hierarchy.py
1
35181
# -*- coding: utf-8 -*- import logging, sys from datetime import datetime from time import time from server import db, utils, errors, conf, request, securitykey from server import forcePost, forceSSL, exposed, internalExposed from server.bones import baseBone, keyBone, numericBone from server.prototypes import BasicApplication from server.skeleton import Skeleton from server.tasks import callDeferred class HierarchySkel(Skeleton): parententry = keyBone(descr="Parent", visible=False, indexed=True, readOnly=True) parentrepo = keyBone(descr="BaseRepo", visible=False, indexed=True, readOnly=True) sortindex = numericBone(descr="SortIndex", mode="float", visible=False, indexed=True, readOnly=True, max=sys.maxint) def preProcessSerializedData(self, dbfields): if not ("sortindex" in dbfields and dbfields["sortindex"]): dbfields["sortindex"] = time() return dbfields class Hierarchy(BasicApplication): """ Hierarchy is a ViUR BasicApplication. It is used for multiple data entities of the same kind, which are stored in an hierarchical order. Every element of the hierarchy can be a child of another element or may contain more children. It needs to be sub-classed for individual modules. :ivar kindName: Name of the kind of data entities that are managed by the application. \ This information is used to bind a specific :class:`server.skeleton.Skeleton`-class to the \ application. For more information, refer to the function :func:`_resolveSkel`. :vartype kindName: str :ivar adminInfo: todo short info on how to use adminInfo. :vartype adminInfo: dict | callable """ accessRights = ["add", "edit", "view", "delete"] # Possible access rights for this app def adminInfo(self): return { "name": self.__class__.__name__, # Module name as shown in the admin tools "handler": "hierarchy", # Which handler to invoke "icon": "icons/modules/hierarchy.svg" # Icon for this module } def __init__(self, moduleName, modulePath, *args, **kwargs): super(Hierarchy, self).__init__(moduleName, modulePath, *args, **kwargs) def viewSkel(self, *args, **kwargs): """ Retrieve a new instance of a :class:`server.skeleton.Skeleton` that is used by the application for viewing an existing entry from the hierarchy. The default is a Skeleton instance returned by :func:`_resolveSkel`. .. seealso:: :func:`addSkel`, :func:`editSkel`, :func:`_resolveSkel` :return: Returns a Skeleton instance for viewing an entry. :rtype: server.skeleton.Skeleton """ return self._resolveSkelCls(*args, **kwargs)() def addSkel(self, *args, **kwargs): """ Retrieve a new instance of a :class:`server.skeleton.Skeleton` that is used by the application for adding an entry to the hierarchy. The default is a Skeleton instance returned by :func:`_resolveSkel`. .. seealso:: :func:`viewSkel`, :func:`editSkel`, :func:`_resolveSkel` :return: Returns a Skeleton instance for adding an entry. :rtype: server.skeleton.Skeleton """ return self._resolveSkelCls(*args, **kwargs)() def editSkel(self, *args, **kwargs): """ Retrieve a new instance of a :class:`server.skeleton.Skeleton` that is used by the application for editing an existing entry from the hierarchy. The default is a Skeleton instance returned by :func:`_resolveSkel`. .. seealso:: :func:`viewSkel`, :func:`editSkel`, :func:`_resolveSkel` :return: Returns a Skeleton instance for editing an entry. :rtype: server.skeleton.Skeleton """ return self._resolveSkelCls(*args, **kwargs)() def getRootNode(self, entryKey): """ Returns the root-node for a given child. :param entryKey: URL-Safe key of the child entry :type entryKey: str :returns: The entity of the root-node. :rtype: :class:`server.db.Entity` """ repo = db.Get(entryKey) while repo and "parententry" in repo: repo = db.Get(repo["parententry"]) assert repo and repo.key().kind() == self.viewSkel().kindName + "_rootNode" return repo def isValidParent(self, parent): """ Checks wherever a given parent is valid. :param parent: Parent to test :type parent: str :returns: Test result. :rtype: bool """ if self.viewSkel().fromDB(parent): # Its a normal node return True try: assert self.getRootNode(parent) return True # Its a rootNode :) except: pass return False def ensureOwnUserRootNode(self): """ Ensures, that an root-node for the current user exists. If no root-node exists yet, it will be created. :returns: The entity of the root-node or None, if this was request was made by a guest. :rtype: :class:`server.db.Entity` """ thisuser = conf["viur.mainApp"].user.getCurrentUser() if thisuser: key = "rep_user_%s" % str(thisuser["key"]) kindName = self.viewSkel().kindName + "_rootNode" return db.GetOrInsert(key, kindName=kindName, creationdate=datetime.now(), rootNode=1, user=str(thisuser["key"])) return None def ensureOwnModuleRootNode(self): """ Ensures, that general root-node for the current module exists. If no root-node exists yet, it will be created. :returns: The entity of the root-node. :rtype: :class:`server.db.Entity` """ key = "rep_module_repo" kindName = self.viewSkel().kindName + "_rootNode" return db.GetOrInsert(key, kindName=kindName, creationdate=datetime.now(), rootNode=1) def isOwnUserRootNode(self, repo): """ Checks, if the given rootNode is owned by the current user. :param repo: URL-safe key of the root-node. :type repo: str :returns: True if the user owns this root-node, False otherwise. :rtype: bool """ thisuser = utils.getCurrentUser() if not thisuser: return False repo = self.getRootNode(repo) user_repo = self.ensureOwnUserRootNode() if str(repo.key.urlsafe()) == user_repo.key.urlsafe(): return True return False def getAvailableRootNodes(self, *args, **kwargs): """ Default function for providing a list of root node items. This list is requested by several module-internal functions and *must* be overridden by a custom functionality. The default stub for this function returns an empty list. An example implementation could be the following: .. code-block:: python def getAvailableRootNodes(self, *args, **kwargs): q = db.Query(self.rootKindName) ret = [{"key": str(e.key()), "name": e.get("name", str(e.key().id_or_name()))} for e in q.run(limit=25)] return ret :param args: Can be used in custom implementations. :param kwargs: Can be used in custom implementations. :return: Returns a list of dicts which must provide a "key" and a "name" entry with \ respective information. :rtype: list of dict """ return [] @callDeferred def deleteRecursive(self, key): """ Recursively processes a delete request. This will delete all entries which are children of *key*, except *key* itself. :param key: URL-safe key of the node which children should be deleted. :type key: str :returns: The number of deleted objects. :rtype: int """ entrys = db.Query(self.viewSkel().kindName).filter("parententry", str(key)).run() for e in entrys: self.deleteRecursive(str(e.key())) vs = self.editSkel() vs.setValues(e) vs.delete() ## Internal exposed functions @internalExposed def pathToKey(self, key=None): """ Returns the recursively expanded path through the Hierarchy from the root-node to a requested node. :param key: URL-safe key of the destination entity. :type key: str :returns: An nested dictionary with information about all nodes in the path from root \ to the requested node. :rtype: dict """ def getName(obj): """ Tries to return a suitable name for the given object. """ if "name" in obj: return obj["name"] skel = self.viewSkel() if "name" in skel: nameBone = getattr(skel, "name") if (isinstance(nameBone, baseBone) and "languages" in dir(nameBone) and nameBone.languages): skel.setValues(obj) return unicode(skel["name"]) return None availableRepos = self.getAvailableRootNodes() if not key: try: key = availableRepos[0]["key"] except: raise errors.NotFound() keylist = [] else: if str(key).isdigit(): key = str(db.Key.from_path(self.viewSkel().kindName, long(key))) keylist = [key] if not self.canList(key): raise errors.Unauthorized() res = [] lastChildren = [] for x in range(0, 99): q = db.Query(self.viewSkel().kindName) q.filter("parententry =", str(key)) q.order("sortindex") entryObjs = q.run(100) lastChildren = res[:] res = [] for obj in entryObjs: if "parententry" in obj: parent = str(obj["parententry"]) else: parent = None r = { "name": getName(obj), "key": str(obj.key()), "parent": parent, "hrk": obj["hrk"] if "hrk" in obj else None, "active": (str(obj.key()) in keylist) } if r["active"]: r["children"] = lastChildren res.append(r) if key in [x["key"] for x in availableRepos]: break else: item = db.Get(str(key)) if item and "parententry" in item: keylist.append(key) key = item["parententry"] else: break return res ## External exposed functions @exposed def listRootNodes(self, *args, **kwargs): """ Renders a list of all available repositories for the current user using the modules default renderer. :returns: The rendered representation of the available root-nodes. :rtype: str """ return self.render.listRootNodes(self.getAvailableRootNodes(*args, **kwargs)) @exposed def preview(self, skey, *args, **kwargs): """ Renders data for an entry, without reading from the database. This function allows to preview an entry without writing it to the database. Any entity values are provided via *kwargs*. The function uses the viewTemplate of the application. :returns: The rendered representation of the the supplied data. """ if not self.canPreview(): raise errors.Unauthorized() if not securitykey.verify(skey): raise errors.PreconditionFailed() skel = self.viewSkel() skel.fromClient(kwargs) return self.render.view(skel) @forceSSL @forcePost @exposed def reparent(self, item, dest, skey, *args, **kwargs): """ Moves an entry *item* (and everything beneath it) to another parent-node *dest*. .. seealso:: :func:`canReparent` :param item: URL-safe key of the item which will be moved. :type item: str :param dest: URL-safe key of the new parent for this item. :type dest: str :returns: A rendered success result generated by the default renderer. :raises: :exc:`server.errors.NotFound`, when no entry with the given *id* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.PreconditionFailed`, if the *skey* could not be verified. """ if not securitykey.validate(skey, acceptSessionKey=True): raise errors.PreconditionFailed() if not self.canReparent(item, dest): raise errors.Unauthorized() if not self.isValidParent(dest) or item == dest: raise errors.NotAcceptable() ## Test for recursion isValid = False currLevel = db.Get(dest) for x in range(0, 99): if str(currLevel.key()) == item: break if currLevel.key().kind() == self.viewSkel().kindName + "_rootNode": # We reached a rootNode isValid = True break currLevel = db.Get(currLevel["parententry"]) if not isValid: raise errors.NotAcceptable() ## Update entry fromItem = db.Get(item) fromItem["parententry"] = dest fromItem["parentrepo"] = str(self.getRootNode(dest).key()) db.Put(fromItem) skel = self.editSkel() assert skel.fromDB(item) self.onItemReparent(skel) self.onItemChanged(skel) return self.render.reparentSuccess(obj=fromItem) @forceSSL @forcePost @exposed def setIndex(self, item, index, skey, *args, **kwargs): """ Changes the order of the elements in the current level by changing the index of *item*. .. seealso:: :func:`canSetIndex` :param item: URL-safe key of the item which index should be changed. :type item: str :param index: New index for this item. This value must be cast-able to float. :type index: str :returns: A rendered success result generated by the default renderer. :raises: :exc:`server.errors.NotFound`, when no entry with the given *key* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.PreconditionFailed`, if the *skey* could not be verified. """ if not securitykey.validate(skey, acceptSessionKey=True): raise errors.PreconditionFailed() if not self.canSetIndex(item, index): raise errors.Unauthorized() fromItem = db.Get(item) fromItem["sortindex"] = float(index) db.Put(fromItem) skel = self.editSkel() assert skel.fromDB(item) self.onItemSetIndex(skel) self.onItemChanged(skel) return self.render.setIndexSuccess(obj=fromItem) @forceSSL @forcePost @exposed def delete(self, key, skey, *args, **kwargs): """ Delete an entry and all its children. The function runs several access control checks on the data before it is deleted. .. seealso:: :func:`canDelete`, :func:`editSkel`, :func:`onItemDeleted` :param key: URL-safe key of the entry to be deleted. :type key: str :returns: The rendered, deleted object of the entry. :raises: :exc:`server.errors.NotFound`, when no entry with the given *key* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.PreconditionFailed`, if the *skey* could not be verified. """ if not securitykey.validate(skey, acceptSessionKey=True): raise errors.PreconditionFailed() skel = self.editSkel() if not skel.fromDB(key): raise errors.NotFound() if not self.canDelete(skel): raise errors.Unauthorized() self.deleteRecursive(key) skel.delete() self.onItemDeleted(skel) self.onItemChanged(skel) return self.render.deleteSuccess(skel) @exposed def view(self, *args, **kwargs): """ Prepares and renders a single entry for viewing. The entry is fetched by its entity key, which either is provided via *kwargs["key"]*, or as the first parameter in *args*. The function performs several access control checks on the requested entity before it is rendered. .. seealso:: :func:`viewSkel`, :func:`canView`, :func:`onItemViewed` :returns: The rendered representation of the requested entity. :raises: :exc:`server.errors.NotAcceptable`, when no *key* is provided. :raises: :exc:`server.errors.NotFound`, when no entry with the given *key* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. """ if "key" in kwargs: key = kwargs["key"] elif len(args) >= 1: key = args[0] else: raise errors.NotAcceptable() if not len(key): raise errors.NotAcceptable() skel = self.viewSkel() if key == u"structure": # We dump just the structure of that skeleton, including it's default values if not self.canView(None): raise errors.Unauthorized() else: # We return a single entry for viewing if not skel.fromDB(key): raise errors.NotFound() if not self.canView(skel): raise errors.Unauthorized() self.onItemViewed(skel) return self.render.view(skel) @exposed def list(self, parent, *args, **kwargs): """ List the entries which are direct children of the given *parent*. Any other supplied parameters are interpreted as filters for the elements displayed. .. seealso:: :func:`canList`, :func:`server.db.mergeExternalFilter` :param parent: URL-safe key of the parent. :type parent: str :returns: The rendered list objects for the matching entries. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.NotFound`, if *parent* could not be found. """ if not parent or not self.canList(parent): raise errors.Unauthorized() parentSkel = self.viewSkel() if not parentSkel.fromDB(parent): if not str(parent) in [str(x["key"]) for x in self.getAvailableRootNodes()]: # It isn't a rootNode either raise errors.NotFound() else: parentSkel = None query = self.viewSkel().all() query.mergeExternalFilter(kwargs) query.filter("parententry", parent) return self.render.list(query.fetch(), parent=parent, parentSkel=parentSkel) @forceSSL @exposed def edit(self, *args, **kwargs): """ Modify an existing entry, and render the entry, eventually with error notes on incorrect data. Data is taken by any other arguments in *kwargs*. The entry is fetched by its entity key, which either is provided via *kwargs["key"]*, or as the first parameter in *args*. The function performs several access control checks on the requested entity before it is modified. .. seealso:: :func:`editSkel`, :func:`onItemEdited`, :func:`canEdit` :returns: The rendered, edited object of the entry, eventually with error hints. :raises: :exc:`server.errors.NotAcceptable`, when no *key* is provided. :raises: :exc:`server.errors.NotFound`, when no entry with the given *key* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.PreconditionFailed`, if the *skey* could not be verified. """ if "skey" in kwargs: skey = kwargs["skey"] else: skey = "" if len(args) == 1: key = args[0] elif "key" in kwargs: key = kwargs["key"] else: raise errors.NotAcceptable() skel = self.editSkel() if not skel.fromDB(key): raise errors.NotAcceptable() if not self.canEdit(skel): raise errors.Unauthorized() if (len(kwargs) == 0 # no data supplied or skey == "" # no security key or not request.current.get().isPostRequest # failure if not using POST-method or not skel.fromClient(kwargs) # failure on reading into the bones or ("bounce" in kwargs and kwargs["bounce"] == "1") # review before changing ): return self.render.edit(skel) if not securitykey.validate(skey, acceptSessionKey=True): raise errors.PreconditionFailed() skel.toDB() # write it! self.onItemEdited(skel) self.onItemChanged(skel) return self.render.editItemSuccess(skel) @forceSSL @exposed def add(self, parent, *args, **kwargs): """ Add a new entry with the given parent, and render the entry, eventually with error notes on incorrect data. Data is taken by any other arguments in *kwargs*. The function performs several access control checks on the requested entity before it is added. .. seealso:: :func:`addSkel`, :func:`onItemAdded`, :func:`canAdd` :param parent: URL-safe key of the parent. :type parent: str :returns: The rendered, added object of the entry, eventually with error hints. :raises: :exc:`server.errors.NotAcceptable`, when no valid *parent* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.PreconditionFailed`, if the *skey* could not be verified. """ if "skey" in kwargs: skey = kwargs["skey"] else: skey = "" if not self.isValidParent(parent): # Ensure the parent exists raise errors.NotAcceptable() if not self.canAdd(parent): raise errors.Unauthorized() skel = self.addSkel() if (len(kwargs) == 0 or skey == "" or not request.current.get().isPostRequest or not skel.fromClient(kwargs) or ("bounce" in kwargs and kwargs["bounce"] == "1") ): return self.render.add(skel) if not securitykey.validate(skey, acceptSessionKey=True): raise errors.PreconditionFailed() skel["parententry"] = str(parent) skel["parentrepo"] = str(self.getRootNode(parent).key()) key = skel.toDB() self.onItemAdded(skel) self.onItemChanged(skel) return self.render.addItemSuccess(skel) @forceSSL @exposed def clone(self, fromRepo, toRepo, fromParent=None, toParent=None, *args, **kwargs): """ Clones a hierarchy recursively. This function only initiates the cloning process, which is performed in the background. It states only a successful result when the clone action has been correctly initiated. :param fromRepo: URL-safe key of the key to the repository (=root-node Key) to clone from. :type fromRepo: str :param toRepo: URL-safe key of the key to the repository (=root-node Key) to clone to. :type toRepo: str :param fromParent: URL-safe key of the parent to clone from; for root nodes, this is equal \ to fromRepo, and can be omitted. :type fromParent: str :param toParent: URL-safe key of the parent to clone to; for root nodes, this is equal to \ toRepo, and can be omitted. :type toParent: str :returns: A rendered success result generated by the default renderer. :raises: :exc:`server.errors.NotAcceptable`, when no valid *parent* was found. :raises: :exc:`server.errors.Unauthorized`, if the current user does not have the required permissions. :raises: :exc:`server.errors.PreconditionFailed`, if the *skey* could not be verified. """ if "skey" in kwargs: skey = kwargs["skey"] else: skey = "" if fromParent is None: fromParent = fromRepo if toParent is None: toParent = toRepo if not (self.isValidParent(fromParent) and self.isValidParent(toParent)): # Ensure the parents exists raise errors.NotAcceptable() if not self.canAdd(toParent): raise errors.Unauthorized() if not securitykey.validate(skey, acceptSessionKey=True): raise errors.PreconditionFailed() self._clone(fromRepo, toRepo, fromParent, toParent) return self.render.cloneSuccess() @callDeferred def _clone(self, fromRepo, toRepo, fromParent, toParent): """ This is the internal cloning function that runs deferred and recursive. """ for node in self.viewSkel().all().filter("parententry =", fromParent).order("sortindex").run(99): old_key = str(node.key()) skel = self.addSkel() skel.fromDB(old_key) for k, v in skel.items(): logging.debug("BEFORE %s = >%s<", (k, skel[k])) skel = skel.clone() # skel.setValues( {}, key=None ) for k, v in skel.items(): logging.debug("BEHIND %s = >%s<", (k, skel[k])) skel["key"] = None skel["parententry"] = toParent skel["parentrepo"] = toRepo new_key = skel.toDB() self.onItemCloned(skel) self.onItemChanged(skel) self._clone(fromRepo, toRepo, old_key, new_key) ## Default accesscontrol functions def canAdd(self, parent): """ Access control function for adding permission. Checks if the current user has the permission to add a new entry to *parent*. The default behavior is: - If no user is logged in, adding is generally refused. - If the user has "root" access, adding is generally allowed. - If the user has the modules "add" permission (module-add) enabled, adding is allowed. It should be overridden for a module-specific behavior. .. seealso:: :func:`add` :param parent: URL-safe key of the parent node under which the element shall be added. :type parent: str :returns: True, if adding entries is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and "%s-add" % self.moduleName in user["access"]: return True return False def canPreview(self): """ Access control function for preview permission. Checks if the current user has the permission to preview an entry. The default behavior is: - If no user is logged in, previewing is generally refused. - If the user has "root" access, previewing is generally allowed. - If the user has the modules "add" or "edit" permission (module-add, module-edit) enabled, \ previewing is allowed. It should be overridden for module-specific behavior. .. seealso:: :func:`preview` :returns: True, if previewing entries is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and ("%s-edit" % self.moduleName in user["access"] or "%s-add" % self.moduleName in user["access"]): return True return False def canEdit(self, skel): """ Access control function for modification permission. Checks if the current user has the permission to edit an entry. The default behavior is: - If no user is logged in, editing is generally refused. - If the user has "root" access, editing is generally allowed. - If the user has the modules "edit" permission (module-edit) enabled, editing is allowed. It should be overridden for a module-specific behavior. .. seealso:: :func:`edit` :param skel: The Skeleton that should be edited. :type skel: :class:`server.skeleton.Skeleton` :returns: True, if editing entries is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and "%s-edit" % self.moduleName in user["access"]: return True return False def canView(self, skel): """ Access control function for viewing permission. Checks if the current user has the permission to view an entry. The default behavior is: - If no user is logged in, viewing is generally refused. - If the user has "root" access, viewing is generally allowed. - If the user has the modules "view" permission (module-view) enabled, viewing is allowed. If skel is None, it's a check if the current user is allowed to retrieve the skeleton structure from this module (ie. there is or could be at least one entry that is visible to that user) It should be overridden for a module-specific behavior. .. seealso:: :func:`view` :param skel: The Skeleton that should be viewed. :type skel: :class:`server.skeleton.Skeleton` | None :returns: True, if viewing is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and "%s-view" % self.moduleName in user["access"]: return True return False def canDelete(self, skel): """ Access control function for delete permission. Checks if the current user has the permission to delete an entry. The default behavior is: - If no user is logged in, deleting is generally refused. - If the user has "root" access, deleting is generally allowed. - If the user has the modules "deleting" permission (module-delete) enabled, \ deleting is allowed. It should be overridden for a module-specific behavior. :param skel: The Skeleton that should be deleted. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`delete` :returns: True, if deleting entries is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and "%s-delete" % self.moduleName in user["access"]: return True return False def canSetIndex(self, item, index): """ Access control function for changing order permission. Checks if the current user has the permission to change the ordering of an entry. The default behavior is: - If no user is logged in, any modification is generally refused. - If the user has "root" access, modification is generally allowed. - If the user has the modules "edit" or "add" permission (module-edit, module-add) enabled, \ modification is allowed. It should be overridden for a module-specific behavior. :param item: URL-safe key of the entry. :type item: str :param item: New sortindex for this item. :type item: float .. seealso:: :func:`setIndex` :returns: True, if changing the order of entries is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return (False) if user["access"] and "root" in user["access"]: return (True) if user["access"] and ( "%s-edit" % self.moduleName in user["access"] or "%s-add" % self.moduleName in user["access"]): return (True) return (False) def canList(self, parent): """ Access control function for listing permission. Checks if the current user has the permission to list the children of the given *parent*. The default behavior is: - If no user is logged in, listing is generally refused. - If the user has "root" access, listing is generally allowed. - If the user has the modules "view" permission (module-view) enabled, listing is allowed. It should be overridden for a module-specific behavior. .. seealso:: :func:`list` :param parent: URL-safe key of the parent. :type parent: str :returns: True, if listing is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and "%s-view" % self.moduleName in user["access"]: return True return False def canReparent(self, item, dest): """ Access control function for item moving permission. Checks if the current user has the permission to move *item* to *dest*. The default behavior is: - If no user is logged in, any modification is generally refused. - If the user has "root" access, modification is generally allowed. - If the user has the modules "edit" permission (module-edit) enabled, moving is allowed. It should be overridden for a module-specific behavior. :param item: URL-safe key of the entry. :type item: str :param item: URL-safe key of the new parent to be moved to. :type item: float .. seealso:: :func:`reparent` :returns: True, if changing the order of entries is allowed, False otherwise. :rtype: bool """ user = utils.getCurrentUser() if not user: return False if user["access"] and "root" in user["access"]: return True if user["access"] and "%s-edit" % self.moduleName in user["access"]: return True return False ## Overridable eventhooks def onItemAdded(self, skel): """ Hook function that is called after adding an entry. It should be overridden for a module-specific behavior. The default is writing a log entry. :param skel: The Skeleton that has been added. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`add` """ logging.info("Entry added: %s" % skel["key"]) user = utils.getCurrentUser() if user: logging.info("User: %s (%s)" % (user["name"], user["key"])) def onItemEdited(self, skel): """ Hook function that is called after modifying an entry. It should be overridden for a module-specific behavior. The default is writing a log entry. :param skel: The Skeleton that has been modified. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`edit` """ logging.info("Entry changed: %s" % skel["key"]) user = utils.getCurrentUser() if user: logging.info("User: %s (%s)" % (user["name"], user["key"])) def onItemViewed(self, skel): """ Hook function that is called when viewing an entry. It should be overridden for a module-specific behavior. The default is doing nothing. :param skel: The Skeleton that is viewed. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`view` """ pass def onItemDeleted(self, skel): """ Hook function that is called after deleting an entry. It should be overridden for a module-specific behavior. The default is writing a log entry. :param skel: The Skeleton that has been deleted. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`delete` """ logging.info("Entry deleted: %s" % skel["key"]) user = utils.getCurrentUser() if user: logging.info("User: %s (%s)" % (user["name"], user["key"])) def onItemReparent(self, skel): """ Hook function that is called after reparenting an entry. It should be overridden for a module-specific behavior. The default is writing a log entry. :param skel: The Skeleton that has been reparented. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`reparent` """ logging.debug("data: %r, %r", skel, skel.keys()) logging.info("Entry reparented: %s" % skel["key"]) user = utils.getCurrentUser() if user: logging.info("User: %s (%s)" % (user["name"], user["key"])) def onItemChanged(self, skel): """ Hook function that is called after changing an entry. It should be overridden for a module-specific behavior. The default is doing nothing because it is additional to the other on* functions. :param skel: The Skeleton that has been deleted. :type skel: :class:`server.skeleton.Skeleton` """ pass def onItemSetIndex(self, skel): """ Hook function that is called after setting a new index an entry. It should be overridden for a module-specific behavior. The default is writing a log entry. :param skel: The Skeleton that has got a new index. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`setIndex` """ logging.info("Entry has a new index: %s" % skel["key"]) user = utils.getCurrentUser() if user: logging.info("User: %s (%s)" % (user["name"], user["key"])) def onItemCloned(self, skel): """ Hook function that is called after cloning an entry. It should be overridden for a module-specific behavior. The default is writing a log entry. :param skel: The Skeleton that has been cloned. :type skel: :class:`server.skeleton.Skeleton` .. seealso:: :func:`_clone` """ logging.info("Entry cloned: %s" % skel["key"]) user = utils.getCurrentUser() if user: logging.info("User: %s (%s)" % (user["name"], user["key"])) ## Renderer specific stuff def jinjaEnv(self, env): """ Provides some additional Jinja2 template functions for hierarchy applications. These function are: - :func:`pathToKey()` alias *getPathToKey()* - :func:`canAdd()` - :func:`canPreview()` - :func:`canEdit()` - :func:`canView()` - :func:`canDelete()` - :func:`canSetIndex()` - :func:`canList()` - :func:`canReparent()` ..warning:: It is important to call the super-class-function of Hierarchy when this function is overridden from a sub-classed module. """ env.globals["getPathToKey"] = self.pathToKey env.globals["canAdd"] = self.canAdd env.globals["canPreview"] = self.canPreview env.globals["canEdit"] = self.canEdit env.globals["canView"] = self.canView env.globals["canDelete"] = self.canDelete env.globals["canSetIndex"] = self.canSetIndex env.globals["canList"] = self.canList env.globals["canReparent"] = self.canReparent return env Hierarchy.admin = True Hierarchy.html = True Hierarchy.vi = True
lgpl-3.0
RalphBariz/RalphsDotNet
Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/nose/plugins/plugintest.py
3
11079
""" Testing Plugins =============== The plugin interface is well-tested enough to safely unit test your use of its hooks with some level of confidence. However, there is also a mixin for unittest.TestCase called PluginTester that's designed to test plugins in their native runtime environment. Here's a simple example with a do-nothing plugin and a composed suite. >>> import unittest >>> from nose.plugins import Plugin, PluginTester >>> class FooPlugin(Plugin): ... pass >>> class TestPluginFoo(PluginTester, unittest.TestCase): ... activate = '--with-foo' ... plugins = [FooPlugin()] ... def test_foo(self): ... for line in self.output: ... # i.e. check for patterns ... pass ... ... # or check for a line containing ... ... assert "ValueError" in self.output ... def makeSuite(self): ... class TC(unittest.TestCase): ... def runTest(self): ... raise ValueError("I hate foo") ... return unittest.TestSuite([TC()]) ... >>> res = unittest.TestResult() >>> case = TestPluginFoo('test_foo') >>> case(res) >>> res.errors [] >>> res.failures [] >>> res.wasSuccessful() True >>> res.testsRun 1 And here is a more complex example of testing a plugin that has extra arguments and reads environment variables. >>> import unittest, os >>> from nose.plugins import Plugin, PluginTester >>> class FancyOutputter(Plugin): ... name = "fancy" ... def configure(self, options, conf): ... Plugin.configure(self, options, conf) ... if not self.enabled: ... return ... self.fanciness = 1 ... if options.more_fancy: ... self.fanciness = 2 ... if 'EVEN_FANCIER' in self.env: ... self.fanciness = 3 ... ... def options(self, parser, env=os.environ): ... self.env = env ... parser.add_option('--more-fancy', action='store_true') ... Plugin.options(self, parser, env=env) ... ... def report(self, stream): ... stream.write("FANCY " * self.fanciness) ... >>> class TestFancyOutputter(PluginTester, unittest.TestCase): ... activate = '--with-fancy' # enables the plugin ... plugins = [FancyOutputter()] ... args = ['--more-fancy'] ... env = {'EVEN_FANCIER': '1'} ... ... def test_fancy_output(self): ... assert "FANCY FANCY FANCY" in self.output, ( ... "got: %s" % self.output) ... def makeSuite(self): ... class TC(unittest.TestCase): ... def runTest(self): ... raise ValueError("I hate fancy stuff") ... return unittest.TestSuite([TC()]) ... >>> res = unittest.TestResult() >>> case = TestFancyOutputter('test_fancy_output') >>> case(res) >>> res.errors [] >>> res.failures [] >>> res.wasSuccessful() True >>> res.testsRun 1 """ import re import sys from warnings import warn try: from cStringIO import StringIO except ImportError: from StringIO import StringIO __all__ = ['PluginTester', 'run'] class PluginTester(object): """A mixin for testing nose plugins in their runtime environment. Subclass this and mix in unittest.TestCase to run integration/functional tests on your plugin. When setUp() is called, the stub test suite is executed with your plugin so that during an actual test you can inspect the artifacts of how your plugin interacted with the stub test suite. - activate - the argument to send nosetests to activate the plugin - suitepath - if set, this is the path of the suite to test. Otherwise, you will need to use the hook, makeSuite() - plugins - the list of plugins to make available during the run. Note that this does not mean these plugins will be *enabled* during the run -- only the plugins enabled by the activate argument or other settings in argv or env will be enabled. - args - a list of arguments to add to the nosetests command, in addition to the activate argument - env - optional dict of environment variables to send nosetests """ activate = None suitepath = None args = None env = {} argv = None plugins = [] ignoreFiles = None def makeSuite(self): """returns a suite object of tests to run (unittest.TestSuite()) If self.suitepath is None, this must be implemented. The returned suite object will be executed with all plugins activated. It may return None. Here is an example of a basic suite object you can return :: >>> import unittest >>> class SomeTest(unittest.TestCase): ... def runTest(self): ... raise ValueError("Now do something, plugin!") ... >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]> """ raise NotImplementedError def _execPlugin(self): """execute the plugin on the internal test suite. """ from nose.config import Config from nose.core import TestProgram from nose.plugins.manager import PluginManager suite = None stream = StringIO() conf = Config(env=self.env, stream=stream, plugins=PluginManager(plugins=self.plugins)) if self.ignoreFiles is not None: conf.ignoreFiles = self.ignoreFiles if not self.suitepath: suite = self.makeSuite() self.nose = TestProgram(argv=self.argv, config=conf, suite=suite, exit=False) self.output = AccessDecorator(stream) def setUp(self): """runs nosetests with the specified test suite, all plugins activated. """ self.argv = ['nosetests', self.activate] if self.args: self.argv.extend(self.args) if self.suitepath: self.argv.append(self.suitepath) self._execPlugin() class AccessDecorator(object): stream = None _buf = None def __init__(self, stream): self.stream = stream stream.seek(0) self._buf = stream.read() stream.seek(0) def __contains__(self, val): return val in self._buf def __iter__(self): return self.stream def __str__(self): return self._buf def blankline_separated_blocks(text): block = [] for line in text.splitlines(True): block.append(line) if not line.strip(): yield "".join(block) block = [] if block: yield "".join(block) def remove_stack_traces(out): # this regexp taken from Python 2.5's doctest traceback_re = re.compile(r""" # Grab the traceback header. Different versions of Python have # said different things on the first traceback line. ^(?P<hdr> Traceback\ \( (?: most\ recent\ call\ last | innermost\ last ) \) : ) \s* $ # toss trailing whitespace on the header. (?P<stack> .*?) # don't blink: absorb stuff until... ^ (?P<msg> \w+ .*) # a line *starts* with alphanum. """, re.VERBOSE | re.MULTILINE | re.DOTALL) blocks = [] for block in blankline_separated_blocks(out): blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<msg>", block)) return "".join(blocks) def simplify_warnings(out): warn_re = re.compile(r""" # Cut the file and line no, up to the warning name ^.*:\d+:\s (?P<category>\w+): \s+ # warning category (?P<detail>.+) $ \n? # warning message ^ .* $ # stack frame """, re.VERBOSE | re.MULTILINE) return warn_re.sub(r"\g<category>: \g<detail>", out) def remove_timings(out): return re.sub( r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out) def munge_nose_output_for_doctest(out): """Modify nose output to make it easy to use in doctests.""" out = remove_stack_traces(out) out = simplify_warnings(out) out = remove_timings(out) return out.strip() def run(*arg, **kw): """ Specialized version of nose.run for use inside of doctests that test test runs. This version of run() prints the result output to stdout. Before printing, the output is processed by replacing the timing information with an ellipsis (...), removing traceback stacks, and removing trailing whitespace. Use this version of run wherever you are writing a doctest that tests nose (or unittest) test result output. Note: do not use doctest: +ELLIPSIS when testing nose output, since ellipses ("test_foo ... ok") in your expected test runner output may match multiple lines of output, causing spurious test passes! """ from nose import run from nose.config import Config from nose.plugins.manager import PluginManager buffer = StringIO() if 'config' not in kw: plugins = kw.pop('plugins', []) if isinstance(plugins, list): plugins = PluginManager(plugins=plugins) env = kw.pop('env', {}) kw['config'] = Config(env=env, plugins=plugins) if 'argv' not in kw: kw['argv'] = ['nosetests', '-v'] kw['config'].stream = buffer # Set up buffering so that all output goes to our buffer, # or warn user if deprecated behavior is active. If this is not # done, prints and warnings will either be out of place or # disappear. stderr = sys.stderr stdout = sys.stdout if kw.pop('buffer_all', False): sys.stdout = sys.stderr = buffer restore = True else: restore = False warn("The behavior of nose.plugins.plugintest.run() will change in " "the next release of nose. The current behavior does not " "correctly account for output to stdout and stderr. To enable " "correct behavior, use run_buffered() instead, or pass " "the keyword argument buffer_all=True to run().", DeprecationWarning, stacklevel=2) try: run(*arg, **kw) finally: if restore: sys.stderr = stderr sys.stdout = stdout out = buffer.getvalue() print munge_nose_output_for_doctest(out) def run_buffered(*arg, **kw): kw['buffer_all'] = True run(*arg, **kw) if __name__ == '__main__': import doctest doctest.testmod()
gpl-3.0
ChanderG/scipy
scipy/spatial/_plotutils.py
53
4034
from __future__ import division, print_function, absolute_import import numpy as np from scipy._lib.decorator import decorator as _decorator __all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d'] @_decorator def _held_figure(func, obj, ax=None, **kw): import matplotlib.pyplot as plt if ax is None: fig = plt.figure() ax = fig.gca() was_held = ax.ishold() try: ax.hold(True) return func(obj, ax=ax, **kw) finally: ax.hold(was_held) def _adjust_bounds(ax, points): ptp_bound = points.ptp(axis=0) ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0], points[:,0].max() + 0.1*ptp_bound[0]) ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1], points[:,1].max() + 0.1*ptp_bound[1]) @_held_figure def delaunay_plot_2d(tri, ax=None): """ Plot the given Delaunay triangulation in 2-D Parameters ---------- tri : scipy.spatial.Delaunay instance Triangulation to plot ax : matplotlib.axes.Axes instance, optional Axes to plot on Returns ------- fig : matplotlib.figure.Figure instance Figure for the plot See Also -------- Delaunay matplotlib.pyplot.triplot Notes ----- Requires Matplotlib. """ if tri.points.shape[1] != 2: raise ValueError("Delaunay triangulation is not 2-D") ax.plot(tri.points[:,0], tri.points[:,1], 'o') ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy()) _adjust_bounds(ax, tri.points) return ax.figure @_held_figure def convex_hull_plot_2d(hull, ax=None): """ Plot the given convex hull diagram in 2-D Parameters ---------- hull : scipy.spatial.ConvexHull instance Convex hull to plot ax : matplotlib.axes.Axes instance, optional Axes to plot on Returns ------- fig : matplotlib.figure.Figure instance Figure for the plot See Also -------- ConvexHull Notes ----- Requires Matplotlib. """ if hull.points.shape[1] != 2: raise ValueError("Convex hull is not 2-D") ax.plot(hull.points[:,0], hull.points[:,1], 'o') for simplex in hull.simplices: ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-') _adjust_bounds(ax, hull.points) return ax.figure @_held_figure def voronoi_plot_2d(vor, ax=None): """ Plot the given Voronoi diagram in 2-D Parameters ---------- vor : scipy.spatial.Voronoi instance Diagram to plot ax : matplotlib.axes.Axes instance, optional Axes to plot on Returns ------- fig : matplotlib.figure.Figure instance Figure for the plot See Also -------- Voronoi Notes ----- Requires Matplotlib. """ if vor.points.shape[1] != 2: raise ValueError("Voronoi diagram is not 2-D") ax.plot(vor.points[:,0], vor.points[:,1], '.') ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o') for simplex in vor.ridge_vertices: simplex = np.asarray(simplex) if np.all(simplex >= 0): ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-') ptp_bound = vor.points.ptp(axis=0) center = vor.points.mean(axis=0) for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices): simplex = np.asarray(simplex) if np.any(simplex < 0): i = simplex[simplex >= 0][0] # finite end Voronoi vertex t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent t /= np.linalg.norm(t) n = np.array([-t[1], t[0]]) # normal midpoint = vor.points[pointidx].mean(axis=0) direction = np.sign(np.dot(midpoint - center, n)) * n far_point = vor.vertices[i] + direction * ptp_bound.max() ax.plot([vor.vertices[i,0], far_point[0]], [vor.vertices[i,1], far_point[1]], 'k--') _adjust_bounds(ax, vor.points) return ax.figure
bsd-3-clause
jo-fu/tl-generator-allinone
ternip/rule_engine/expressions.py
3
2820
#!/usr/bin/env python # predef re's for replacing ORDINAL_WORDS = r'(tenth|eleventh|twelfth|thirteenth|fourteenth|fifteenth|sixteenth|seventeenth|eighteenth|nineteenth|twentieth|twenty-first|twenty-second|twenty-third|twenty-fourth|twenty-fifth|twenty-sixth|twenty-seventh|twenty-eighth|twenty-ninth|thirtieth|thirty-first|first|second|third|fourth|fifth|sixth|seventh|eighth|ninth)' ORDINAL_NUMS = r'([23]?1-?st|11-?th|[23]?2-?nd|12-?th|[12]?3-?rd|13-?th|[12]?[4-90]-?th|30-?th)' DAYS = r'(monday|tuesday|wednesday|thursday|friday|saturday|sunday)' MONTHS = r'(january|february|march|april|may|june|july|august|september|october|november|december)' MONTH_ABBRS = r'(jan|feb|mar|apr|may|jun|jul|aug|sep|sept|oct|nov|dec)\.?' RELATIVE_DAYS = r'(today|yesterday|tomorrow|tonight|tonite)' DAY_HOLIDAYS = r'(election|memorial|C?Hanukk?ah|Rosh|Kippur|tet|diwali|halloween)' NTH_DOW_HOLIDAYS = r'(mlk|king|president|canberra|mother|father|labor|columbus|thanksgiving)' FIXED_HOLIDAYS = r'(<new~.+><year~.+>|<inauguration~.+>|<valentine~.+>|<ground~.+>|<candlemas~.+>|<patrick~.+>|<fool~.+>|<(saint|st\.)~.+><george~.+>|<walpurgisnacht~.+>|<may~.+><day~.+>|<beltane~.+>|<cinco~.+>|<flag~.+>|<baptiste~.+>|<canada~.+>|<dominion~.+>|<independence~.+>|<bastille~.+>|<halloween~.+>|<allhallow~.+>|<all~.+><(saint|soul)s~.+>|<day~.+><of~.+><the~.+><dead~.+>|<fawkes~.+>|<veteran~.+>|<christmas~.+>|<xmas~.+>|<boxing~.+>)' LUNAR_HOLIDAYS = r'(<easter~.+>|<palm~.+><sunday~.+>|<good~.+><friday~.+>|<ash~.+><wednesday~.+>|<shrove~.+><tuesday~.+>|<mardis~.+><gras~.+>)' NUMBER_TERM = r'(one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|hundred|thousand|million|billion|trillion|first|second|third|fourth|fifth|sixth|seventh|eighth|ninth|tenth|eleventh|twelfth|thirteenth|fourteenth|fifteenth|sixteenth|seventeenth|eighteenth|nineteenth|twentieth|thirtieth|fortieth|fiftieth|sixtieth|seventieth|eightieth|ninetieth|hundreth|thousandth|millionth|billionth|trillionth)' ORD_UNIT_NUMS = r'(first|second|third|fourth|fifth|sixth|seventh|eighth|ninth)' ORD_OTHER_NUMS = r'(tenth|eleventh|twelfth|thirteenth|fourteenth|fifteenth|sixteenth|seventeenth|eighteenth|nineteenth|twentieth|thirtieth|fortieth|fiftieth|sixtieth|seventieth|eightieth|ninetieth|hundreth|thousandth|millionth|billionth|trillionth)' HIGHER_NUMS = r'(hundred|thousand|million|billion|trillion)' UNIT_NUMS = r'(one|two|three|four|five|six|seven|eight|nine)' UNIQUE_NUMS = r'(ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen)' TENS_NUMS = r'(twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety)' UNITS = r'(second|minute|hour|day|month|year|week|fortnight|decade|centur(y|ie)|milleni(um|a))'
mit
Thraxis/pymedusa
lib/rebulk/test/test_toposort.py
36
4042
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014 True Blade Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Original: # - https://bitbucket.org/ericvsmith/toposort (1.4) # Modifications: # - port to pytest # pylint: skip-file import pytest from ..toposort import toposort, toposort_flatten, CyclicDependency class TestCase(object): def test_simple(self): results = list(toposort({2: set([11]), 9: set([11, 8]), 10: set([11, 3]), 11: set([7, 5]), 8: set([7, 3])})) expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])] assert results == expected # make sure self dependencies are ignored results = list(toposort({2: set([2, 11]), 9: set([11, 8]), 10: set([10, 11, 3]), 11: set([7, 5]), 8: set([7, 3])})) expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])] assert results == expected assert list(toposort({1: set()})) == [set([1])] assert list(toposort({1: set([1])})) == [set([1])] def test_no_dependencies(self): assert list(toposort({1: set([2]), 3: set([4]), 5: set([6])})) == [set([2, 4, 6]), set([1, 3, 5])] assert list(toposort({1: set(), 3: set(), 5: set()})) == [set([1, 3, 5])] def test_empty(self): assert list(toposort({})) == [] def test_strings(self): results = list(toposort({'2': set(['11']), '9': set(['11', '8']), '10': set(['11', '3']), '11': set(['7', '5']), '8': set(['7', '3'])})) expected = [set(['3', '5', '7']), set(['8', '11']), set(['2', '9', '10'])] assert results == expected def test_objects(self): o2 = object() o3 = object() o5 = object() o7 = object() o8 = object() o9 = object() o10 = object() o11 = object() results = list(toposort({o2: set([o11]), o9: set([o11, o8]), o10: set([o11, o3]), o11: set([o7, o5]), o8: set([o7, o3, o8])})) expected = [set([o3, o5, o7]), set([o8, o11]), set([o2, o9, o10])] assert results == expected def test_cycle(self): # a simple, 2 element cycle with pytest.raises(CyclicDependency): list(toposort({1: set([2]), 2: set([1])})) # an indirect cycle with pytest.raises(CyclicDependency): list(toposort({1: set([2]), 2: set([3]), 3: set([1])})) def test_input_not_modified(self): data = {2: set([11]), 9: set([11, 8]), 10: set([11, 3]), 11: set([7, 5]), 8: set([7, 3, 8]), # includes something self-referential } orig = data.copy() results = list(toposort(data)) assert data == orig def test_input_not_modified_when_cycle_error(self): data = {1: set([2]), 2: set([1]), 3: set([4]), } orig = data.copy() with pytest.raises(CyclicDependency): list(toposort(data)) assert data == orig class TestCaseAll(object): def test_sort_flatten(self): data = {2: set([11]), 9: set([11, 8]), 10: set([11, 3]), 11: set([7, 5]), 8: set([7, 3, 8]), # includes something self-referential } expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])] assert list(toposort(data)) == expected # now check the sorted results results = [] for item in expected: results.extend(sorted(item)) assert toposort_flatten(data) == results # and the unsorted results. break the results up into groups to compare them actual = toposort_flatten(data, False) results = [set([i for i in actual[0:3]]), set([i for i in actual[3:5]]), set([i for i in actual[5:8]])] assert results == expected
gpl-3.0
Jgarcia-IAS/Fidelizacion_odoo
openerp/report/render/html2html/__init__.py
381
1091
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from html2html import parseString # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
nathanial/lettuce
tests/integration/lib/Django-1.2.5/django/contrib/gis/geos/prototypes/predicates.py
623
1777
""" This module houses the GEOS ctypes prototype functions for the unary and binary predicate operations on geometries. """ from ctypes import c_char, c_char_p, c_double from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.prototypes.errcheck import check_predicate from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc ## Binary & unary predicate functions ## def binary_predicate(func, *args): "For GEOS binary predicate functions." argtypes = [GEOM_PTR, GEOM_PTR] if args: argtypes += args func.argtypes = argtypes func.restype = c_char func.errcheck = check_predicate return func def unary_predicate(func): "For GEOS unary predicate functions." func.argtypes = [GEOM_PTR] func.restype = c_char func.errcheck = check_predicate return func ## Unary Predicates ## geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ')) geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty')) geos_isring = unary_predicate(GEOSFunc('GEOSisRing')) geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple')) geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid')) ## Binary Predicates ## geos_contains = binary_predicate(GEOSFunc('GEOSContains')) geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses')) geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint')) geos_equals = binary_predicate(GEOSFunc('GEOSEquals')) geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double) geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects')) geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps')) geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p) geos_touches = binary_predicate(GEOSFunc('GEOSTouches')) geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
gpl-3.0
jpshort/odoo
addons/crm/wizard/crm_phonecall_to_meeting.py
381
2704
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv from openerp.tools.translate import _ class crm_phonecall2meeting(osv.osv_memory): """ Phonecall to Meeting """ _name = 'crm.phonecall2meeting' _description = 'Phonecall To Meeting' def action_cancel(self, cr, uid, ids, context=None): """ Closes Phonecall to Meeting form @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of Phonecall to Meeting IDs @param context: A standard dictionary for contextual values """ return {'type':'ir.actions.act_window_close'} def action_make_meeting(self, cr, uid, ids, context=None): """ This opens Meeting's calendar view to schedule meeting on current Phonecall @return : Dictionary value for created Meeting view """ res = {} phonecall_id = context and context.get('active_id', False) or False if phonecall_id: phonecall = self.pool.get('crm.phonecall').browse(cr, uid, phonecall_id, context) res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context) res['context'] = { 'default_phonecall_id': phonecall.id, 'default_partner_id': phonecall.partner_id and phonecall.partner_id.id or False, 'default_user_id': uid, 'default_email_from': phonecall.email_from, 'default_state': 'open', 'default_name': phonecall.name, } return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rytmis/dotless
lib/PEG_GrammarExplorer/PEG_GrammarExplorer/PegSamples/python_2_5_2/input/adwords/awapi_python_samples_1.0.0/src/get_all_campaigns.py
6
2106
#!/usr/bin/python # # Copyright 2008, Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code sample retrieves information about all campaigns that belongs to the customer issuing the request.""" import SOAPpy # Provide AdWords login information. email = 'INSERT_LOGIN_EMAIL_HERE' password = 'INSERT_PASSWORD_HERE' client_email = 'INSERT_CLIENT_LOGIN_EMAIL_HERE' useragent = 'INSERT_COMPANY_NAME: AdWords API Python Sample Code' developer_token = 'INSERT_DEVELOPER_TOKEN_HERE' application_token = 'INSERT_APPLICATION_TOKEN_HERE' # Define SOAP headers. headers = SOAPpy.Types.headerType() headers.email = email headers.password = password headers.clientEmail = client_email headers.useragent = useragent headers.developerToken = developer_token headers.applicationToken = application_token # Set up service connection. To view XML request/response, change value of # campaign_service.config.debug to 1. To send requests to production # environment, replace "sandbox.google.com" with "adwords.google.com". namespace = 'https://sandbox.google.com/api/adwords/v12' campaign_service = SOAPpy.SOAPProxy(namespace + '/CampaignService', header=headers) campaign_service.config.debug = 0 # Get all campaigns. campaigns = campaign_service.getAllAdWordsCampaigns(0) # Convert to a list if we get back a single object. if len(campaigns) > 0 and not isinstance(campaigns, list): campaigns = [campaigns] # Display campaign info. for campaign in campaigns: print 'Campaign name is "%s" id is "%s".' % (campaign['name'], campaign['id'])
apache-2.0
ppke-nlpg/AnaGramma-Parser
engine/utils.py
1
3130
#!/usr/bin/python3 # -*- coding: utf-8, vim: expandtab:ts=4 -*- from itertools import chain from nltk.featstruct import FeatStruct, CustomFeatureValue, UnificationFailure from typing import Any def update_nested_frozen_fs(fs, dictinoary): fs = {k: v for k, v in fs.items() if k not in dictinoary.keys()} fs.update(dictinoary) return nested_frozen_fs(fs) def nested_frozen_fs(dictionary): if not isinstance(dictionary, FeatStruct): ret = FeatStruct() for k, v in dictionary.items(): v_new = v if isinstance(v_new, set): v_new = frozenset(v_new) elif isinstance(v_new, dict): v_new = nested_frozen_fs(v_new) ret[k] = v_new ret.freeze() return ret else: dictionary.freeze() return dictionary class UnifiableSet(CustomFeatureValue): """ Simple Set union on unify() the internal unification is handled elsewhere. TODO: Maybe do it here? """ def __init__(self, data): self.data = frozenset([data]) self._frozen = True super().__init__() def unify(self, other): if not isinstance(other, self.__class__): return UnificationFailure data = [i for i in sorted(chain(self.data, other.data))] self.data = frozenset(unify_till_pass(data)) # Union return self def pop(self): return set(self.data).pop() def __repr__(self): return str(self.data) def __str__(self): return str(self.data) def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.data == other.data def __lt__(self, other): if not isinstance(other, self.__class__): return True return self.data < other.data # isSubset def __hash__(self): return hash(self.data) def __iter__(self): return iter(self.data) def frozen(self): return self._frozen def flatten(it: [[Any]]) -> [Any]: return list(chain.from_iterable(it)) def unify_till_pass(actions): actions.sort() passed_actions = [] while len(actions) > 1: # There is something to unify... first = actions.pop(0) to_pop = None new = None for second in actions: # Unify with all the remaining... success, new = first.unify_searchers(second) if success: to_pop = second # When succeded, remember to remove from the list of actions break else: passed_actions.append(first) # One round "Pass" completed for this element... if to_pop is not None: # Unification success... actions.remove(to_pop) # Remove the one which has been unified actions.append(new) # Add the newly created one # actions.extend(passed_actions) # todo: Reuse "passed" actions until every action is "passed"... # passed_actions = [] if len(actions) == 1: passed_actions.append(actions.pop()) actions = passed_actions return actions
lgpl-3.0
anaruse/chainer
tests/chainer_tests/functions_tests/connection_tests/test_embed_id.py
2
4318
import unittest import numpy import chainer from chainer.backends import cuda from chainer.functions.connection import embed_id from chainer import gradient_check from chainer import testing from chainer.testing import attr @testing.parameterize(*testing.product_dict( [{'x_data': [0, 1, 0], 'ignore_label': None}, {'x_data': [[0, 1, 0], [1, 0, 1]], 'ignore_label': None}, {'x_data': [0, 1, -1], 'ignore_label': -1}, {'x_data': [[0, 1, -1], [-1, 0, 1]], 'ignore_label': -1}], [{'label_dtype': numpy.int8}, {'label_dtype': numpy.int16}, {'label_dtype': numpy.int32}, {'label_dtype': numpy.int64}] )) class TestEmbedID(unittest.TestCase): def setUp(self): self.x = numpy.array(self.x_data, dtype=self.label_dtype) self.W = numpy.random.uniform(-1, 1, (3, 2)).astype('f') y_shape = self.x.shape + (2,) self.gy = numpy.random.uniform(-1, 1, y_shape).astype(numpy.float32) self.ggW = numpy.random.uniform(-1, 1, (3, 2)).astype('f') self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-2} self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2} def check_forward(self, x_data, W_data): x = chainer.Variable(x_data) W = chainer.Variable(W_data) y = chainer.functions.embed_id(x, W, self.ignore_label) self.assertEqual(y.data.dtype, numpy.float32) y_expect = numpy.empty_like(self.gy) for i in numpy.ndindex(self.x.shape): if self.x[i] == -1: y_expect[i] = 0 else: y_expect[i] = self.W[int(self.x[i])] testing.assert_allclose(y_expect, y.data, atol=0, rtol=0) def test_forward_cpu(self): self.check_forward(self.x, self.W) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.W)) def check_backward(self, x_data, W_data, y_grad): def f(x, W): return chainer.functions.embed_id(x, W, self.ignore_label) gradient_check.check_backward( f, (x_data, W_data), y_grad, dtype=numpy.float64, **self.check_backward_options) def test_backward_cpu(self): self.check_backward(self.x, self.W, self.gy) @attr.gpu def test_backward_gpu(self): self.check_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.W), cuda.to_gpu(self.gy)) def check_double_backward(self, x_data, W_data, gy_data, ggW_data): def f(W): y = chainer.functions.embed_id( x_data, W, self.ignore_label) return y * y gradient_check.check_double_backward( f, W_data, gy_data, ggW_data, **self.check_double_backward_options) def test_double_backward_cpu(self): self.check_double_backward(self.x, self.W, self.gy, self.ggW) @attr.gpu def test_double_backward_gpu(self): self.check_double_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.W), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggW)) @testing.parameterize( {'x_data': [0, 1, 0], 'ignore_label': None}, {'x_data': [[0, 1, 0], [1, 0, 1]], 'ignore_label': None}, {'x_data': [0, 1, -1], 'ignore_label': -1}, {'x_data': [[0, 1, -1], [-1, 0, 1]], 'ignore_label': -1}, {'x_data': [0, 1, 2], 'ignore_label': 2}, {'x_data': [[0, 1, 0], [1, 0, 1]], 'ignore_label': 1}, ) class TestEmbedIdGrad(unittest.TestCase): n_unit = (4,) w_shape = (4, 2) def setUp(self): self.x = numpy.array(self.x_data, dtype='i') self.gy = numpy.random.uniform( -1, 1, self.x.shape + (2,)).astype('f') self.ggW = numpy.random.uniform(-1, 1, self.w_shape).astype('f') def check_backward(self, x, gy, ggW): return def f(x, gy): emb = embed_id.EmbedIDGrad( self.w_shape, self.ignore_label) return emb.apply((x, numpy.zeros(()), gy))[0] gradient_check.check_backward(f, (x, gy), (ggW,)) def test_backward_cpu(self): self.check_backward(self.x, self.gy, self.ggW) @attr.gpu def test_backward_gpu(self): self.check_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggW)) testing.run_module(__name__, __file__)
mit
Nowheresly/odoo
addons/mass_mailing/tests/test_mail.py
388
1221
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.mail.tests.common import TestMail class test_message_compose(TestMail): def test_OO_mail_mail_tracking(self): """ Tests designed for mail_mail tracking (opened, replied, bounced) """ pass
agpl-3.0
daiz713/Apricot
make_ui_table.py
1
3689
#!/usr/bin/python # -*- coding: utf-8 -*- # Project Apricot # Copyright (c) 2015 daiz. # import sys import os.path from PIL import Image # 末尾改行なしで出力する(print no \n) def printnn(value): sys.stdout.write(str(value)) # 画像のパスが有効であることを確認して返す def get_img_path(): if len(sys.argv) == 2: img_path = sys.argv[1] if os.path.exists(img_path): return img_path return None # 画像の基本情報を取得して返す def get_img_info(path): img = Image.open(path) imgInfo = { "size_h": img.size[1], "size_w": img.size[0], "mode": img.mode, "format": img.format } return imgInfo # 画像の色情報を二次元配列に保持する def collect_px_rgb(path): img = Image.open(path) rgb = list(img.getdata()) return rgb # 画像ファイルから色情報を収集しエリアを分割する # 補正済みの画像が与えられる def init_colorPx(): # rgb: 色RGB値 # time: 連続する出現回数 # ibv(index_begin_vertex): 連続出現開始の頂点座標[x, y] return {"rgb": None, "time": 0, "ibv": [0,0]} def get_shape_vertex(array_rgb, w, h): # 色情報 time = 1 colorPx = init_colorPx() colorInfo = [] # 同じ行内において、直前と同じ色であれば 0, 異なれば 1 last_px_color = None for y in range(0, h): for x in range(0, w): px_color = array_rgb[w*y + x] if last_px_color != px_color: # 前色の記録 colorPx["time"] = time colorInfo.append(colorPx) # 新色の連続開始 colorPx = init_colorPx() colorPx["rgb"] = px_color colorPx["ibv"] = [x, y] time = 1 #printnn(1) else: time += 1 #printnn(0) last_px_color = px_color #print('') # 前色の記録 colorPx["time"] = time colorInfo.append(colorPx) return colorInfo # プログラムが保持しているカラー情報を可視化 useVars = list('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') usingColors = [] def debug_getvar(rgb): rgbvar = None for uc in usingColors: if uc['rgb'] == rgb: rgbvar = uc['rgbvar'] if rgbvar == None: rgbvar = useVars[0] del(useVars[0]) usingColors.append({"rgb": rgb, "rgbvar": rgbvar}) if(len(useVars) <= 0): print('Err: color-overflow') return rgbvar def debug_render(colorInfo, w, h): # t=0番目は無視 t = 1 tc = colorInfo[t] for y in range(0, h): for x in range(0, w): if tc != None and (x == tc['ibv'][0] and y == tc['ibv'][1]): v = debug_getvar(tc['rgb']) printnn(v) t += 1 if(t < len(colorInfo)): tc = colorInfo[t] else: tc = None else: printnn(v) print('') printnn('\n') # 使用されているカラー情報を出力 for color in usingColors: print("{}: rgba{}".format(color['rgbvar'], color['rgb'])) # 最大カラー変数名を出力 printnn('\n') print(len(usingColors) - 1) # Bug? if __name__ == '__main__': src = get_img_path() imginfo = get_img_info(src) if src != None: rgb_arr = collect_px_rgb(src) w = imginfo['size_w'] h = imginfo['size_h'] colorinfo = get_shape_vertex(rgb_arr, w, h) debug_render(colorinfo, w, h)
mit
slarosa/QGIS
python/plugins/sextante/outputs/OutputNumber.py
4
1337
# -*- coding: utf-8 -*- """ *************************************************************************** OutputNumber.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from sextante.outputs.Output import Output class OutputNumber(Output): def __init__(self, name="", description=""): self.name = name self.description = description self.value = None self.hidden = True
gpl-2.0
meihuanyu/rental
rental/middlewares.py
1
6464
#!/usr/bin/python # -*-coding:utf-8-*- import time import random from scrapy.downloadermiddlewares.retry import RetryMiddleware #代理ip,这是固定的导入 from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware import pymongo import logging import utils class TimeoutIp(RetryMiddleware): def __init__(self,ip=''): '''初始化数据库''' connection = pymongo.MongoClient('127.0.0.1',27017) db = connection["ipproxy"] self.collection = db["iwjw"] '''初始化数据 ''' self.ipnum = 0 self.ipps=list(self.collection.find({"save_time":{"$lt":str(time.time())}})) def process_request(self, request, spider): '''轮训ip 排排坐''' self.ipnum +=1 self.ipnum = self.ipnum%len(self.ipps) request.meta["proxy"] = '%s%s:%s' % ('http://',self.ipps[self.ipnum]['ip'],self.ipps[self.ipnum]['port']) #request.meta["proxy"] = "http://111.13.71.119:80" _iptext='%s%s' % ('切换ip:',request.meta["proxy"]) utils.log(_iptext) def process_response(self, request, response, spider): try: if request.meta['download_latency']>2: self.errotRepeat('IpErrNum','超时ip:',10) else: self.ipps[self.ipnum]['IpErrNum'] = 0 if response.status>400: self.errotRepeat('ResErrNum','请求400错误:',10) else: self.ipps[self.ipnum]['ResErrNum'] = 0 return response except Exception, e: logging.warning(e) return response def process_exception(self, request, exception, spider): if "User timeout" in str(exception): self.errotRepeat('userTimeout', 'User timeout错误:',10) elif "side: 61" in str(exception): self.errotRepeat('sideError', 'side: 61错误:',10) elif "OpenSSL.SSL.Error" in str(exception): self.errotRepeat('opensslError', 'openssl error:', 10) else: self.errotRepeat('accidentError', '意外的错误:', 10) _errtext='%s%s%s%s' % ('意外的错误',request.meta['proxy'],'--',request.url) logging.warning(_errtext) logging.warning(exception) '''返回ipps params: updateNum 更新save_time return 所有可用ip ''' def getIppsAndUpdatetime(self,updateNum): if updateNum: self.collection.update_one({"_id": self.ipps[updateNum]['_id']}, {"$set": { "save_time": str(time.time() + 86400), "areOk":0 }}) logging.warning('已废弃----ip:'+self.ipps[updateNum]['ip']) self.ipps = list(self.collection.find({"save_time": {"$lt": str(time.time())}})) return self.ipps def errotRepeat(self,errKey,errVal,_errnum): if errKey in self.ipps[self.ipnum]: self.ipps[self.ipnum][errKey] += 1 _errtext = '%s%s%s%s' % (errVal, self.ipps[self.ipnum]['ip'], '次数:', self.ipps[self.ipnum][errKey]) logging.warning(_errtext) else: self.ipps[self.ipnum][errKey] = 0 if self.ipps[self.ipnum][errKey] == _errnum: logging.warning(errVal+"已达10次开始重置---") self.ipps = self.getIppsAndUpdatetime(self.ipnum) def setTime(self): allip=self.collection.find() for ip in allip: self.collection.update_one({"_id": ip['_id']}, {"$set": { "save_time": str(time.time()), "areOk":1 }}) class RotateUserAgentMiddleware(UserAgentMiddleware): def __init__(self, user_agent=''): self.user_agent = user_agent def process_request(self, request, spider): ua = random.choice(self.user_agent_list) if ua: request.headers.setdefault('User-Agent', ua) # the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape # for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php user_agent_list = [ \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \ "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ]
mit
cchurch/ansible
lib/ansible/modules/cloud/vmware/vmware_guest_tools_upgrade.py
23
7065
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, Mike Klebolt <michael.klebolt@centurylink.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_guest_tools_upgrade short_description: Module to upgrade VMTools version_added: 2.8 description: - This module upgrades the VMware Tools on Windows and Linux guests. requirements: - "python >= 2.6" - PyVmomi notes: - In order to upgrade VMTools, please power on virtual machine before hand - either 'manually' or using module M(vmware_guest_powerstate). options: name: description: - Name of the virtual machine to work with. - This is required if C(uuid) or C(moid) is not supplied. type: str name_match: description: - If multiple virtual machines matching the name, use the first or last found. default: 'first' choices: ['first', 'last'] type: str uuid: description: - UUID of the instance to manage if known, this is VMware's unique identifier. - This is required if C(name) or C(moid) is not supplied. type: str moid: description: - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance. - This is required if C(name) or C(uuid) is not supplied. version_added: '2.9' type: str folder: description: - Destination folder, absolute or relative path to find an existing guest. - This is required, if C(name) is supplied. - The folder should include the datacenter. ESX's datacenter is ha-datacenter - 'Examples:' - ' folder: /ha-datacenter/vm' - ' folder: ha-datacenter/vm' - ' folder: /datacenter1/vm' - ' folder: datacenter1/vm' - ' folder: /datacenter1/vm/folder1' - ' folder: datacenter1/vm/folder1' - ' folder: /folder1/datacenter1/vm' - ' folder: folder1/datacenter1/vm' - ' folder: /folder1/datacenter1/vm/folder2' type: str datacenter: description: - Destination datacenter where the virtual machine exists. required: True type: str extends_documentation_fragment: vmware.documentation author: - Mike Klebolt (@MikeKlebolt) <michael.klebolt@centurylink.com> ''' EXAMPLES = ''' - name: Upgrade VMware Tools using uuid vmware_guest_tools_upgrade: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter_name }}" uuid: 421e4592-c069-924d-ce20-7e7533fab926 delegate_to: localhost - name: Upgrade VMware Tools using MoID vmware_guest_tools_upgrade: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter_name }}" moid: vm-42 delegate_to: localhost ''' RETURN = ''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task from ansible.module_utils._text import to_native class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) def upgrade_tools(self, vm): result = {'failed': False, 'changed': False, 'msg': ''} # Exit if VMware tools is already up to date if vm.guest.toolsStatus == "toolsOk": result.update( changed=False, msg="VMware tools is already up to date", ) return result # Fail if VM is not powered on elif vm.summary.runtime.powerState != "poweredOn": result.update( failed=True, msg="VM must be powered on to upgrade tools", ) return result # Fail if VMware tools is either not running or not installed elif vm.guest.toolsStatus in ["toolsNotRunning", "toolsNotInstalled"]: result.update( failed=True, msg="VMware tools is either not running or not installed", ) return result # If vmware tools is out of date, check major OS family # Upgrade tools on Linux and Windows guests elif vm.guest.toolsStatus == "toolsOld": try: if vm.guest.guestFamily in ["linuxGuest", "windowsGuest"]: task = vm.UpgradeTools() changed, err_msg = wait_for_task(task) result.update(changed=changed, msg=to_native(err_msg)) else: result.update(msg='Guest Operating System is other than Linux and Windows.') return result except Exception as exc: result.update( failed=True, msg='Error while upgrading VMware tools %s' % to_native(exc), ) return result else: result.update( failed=True, msg="VMware tools could not be upgraded", ) return result def main(): argument_spec = vmware_argument_spec() argument_spec.update( name=dict(type='str'), name_match=dict(type='str', choices=['first', 'last'], default='first'), uuid=dict(type='str'), moid=dict(type='str'), folder=dict(type='str'), datacenter=dict(type='str', required=True), ) module = AnsibleModule( argument_spec=argument_spec, required_one_of=[ ['name', 'uuid', 'moid'] ] ) if module.params['folder']: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified module.params['folder'] = module.params['folder'].rstrip('/') pyv = PyVmomiHelper(module) # Check if the VM exists before continuing vm = pyv.get_vm() # VM already exists if vm: try: result = pyv.upgrade_tools(vm) if result['changed']: module.exit_json(changed=result['changed']) elif result['failed']: module.fail_json(msg=result['msg']) else: module.exit_json(msg=result['msg'], changed=result['changed']) except Exception as exc: module.fail_json(msg='Unknown error: %s' % to_native(exc)) else: vm_id = module.params.get('uuid') or module.params.get('name') or module.params.get('moid') module.fail_json(msg='Unable to find VM %s' % vm_id) if __name__ == '__main__': main()
gpl-3.0
librosa/librosa
librosa/util/utils.py
1
64787
#!/usr/bin/env python # -*- coding: utf-8 -*- """Utility functions""" import warnings import scipy.ndimage import scipy.sparse import numpy as np import numba from numpy.lib.stride_tricks import as_strided from .._cache import cache from .exceptions import ParameterError # Constrain STFT block sizes to 256 KB MAX_MEM_BLOCK = 2 ** 8 * 2 ** 10 __all__ = [ "MAX_MEM_BLOCK", "frame", "pad_center", "fix_length", "valid_audio", "valid_int", "valid_intervals", "fix_frames", "axis_sort", "localmax", "localmin", "normalize", "peak_pick", "sparsify_rows", "shear", "stack", "fill_off_diagonal", "index_to_slice", "sync", "softmask", "buf_to_float", "tiny", "cyclic_gradient", "dtype_r2c", "dtype_c2r", ] def frame(x, frame_length, hop_length, axis=-1): """Slice a data array into (overlapping) frames. This implementation uses low-level stride manipulation to avoid making a copy of the data. The resulting frame representation is a new view of the same input data. However, if the input data is not contiguous in memory, a warning will be issued and the output will be a full copy, rather than a view of the input data. For example, a one-dimensional input ``x = [0, 1, 2, 3, 4, 5, 6]`` can be framed with frame length 3 and hop length 2 in two ways. The first (``axis=-1``), results in the array ``x_frames``:: [[0, 2, 4], [1, 3, 5], [2, 4, 6]] where each column ``x_frames[:, i]`` contains a contiguous slice of the input ``x[i * hop_length : i * hop_length + frame_length]``. The second way (``axis=0``) results in the array ``x_frames``:: [[0, 1, 2], [2, 3, 4], [4, 5, 6]] where each row ``x_frames[i]`` contains a contiguous slice of the input. This generalizes to higher dimensional inputs, as shown in the examples below. In general, the framing operation increments by 1 the number of dimensions, adding a new "frame axis" either to the end of the array (``axis=-1``) or the beginning of the array (``axis=0``). Parameters ---------- x : np.ndarray Array to frame frame_length : int > 0 [scalar] Length of the frame hop_length : int > 0 [scalar] Number of steps to advance between frames axis : 0 or -1 The axis along which to frame. If ``axis=-1`` (the default), then ``x`` is framed along its last dimension. ``x`` must be "F-contiguous" in this case. If ``axis=0``, then ``x`` is framed along its first dimension. ``x`` must be "C-contiguous" in this case. Returns ------- x_frames : np.ndarray [shape=(..., frame_length, N_FRAMES) or (N_FRAMES, frame_length, ...)] A framed view of ``x``, for example with ``axis=-1`` (framing on the last dimension):: x_frames[..., j] == x[..., j * hop_length : j * hop_length + frame_length] If ``axis=0`` (framing on the first dimension), then:: x_frames[j] = x[j * hop_length : j * hop_length + frame_length] Raises ------ ParameterError If ``x`` is not an `np.ndarray`. If ``x.shape[axis] < frame_length``, there is not enough data to fill one frame. If ``hop_length < 1``, frames cannot advance. If ``axis`` is not 0 or -1. Framing is only supported along the first or last axis. See Also -------- numpy.asfortranarray : Convert data to F-contiguous representation numpy.ascontiguousarray : Convert data to C-contiguous representation numpy.ndarray.flags : information about the memory layout of a numpy `ndarray`. Examples -------- Extract 2048-sample frames from monophonic signal with a hop of 64 samples per frame >>> y, sr = librosa.load(librosa.ex('trumpet')) >>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64) >>> frames array([[-1.407e-03, -2.604e-02, ..., -1.795e-05, -8.108e-06], [-4.461e-04, -3.721e-02, ..., -1.573e-05, -1.652e-05], ..., [ 7.960e-02, -2.335e-01, ..., -6.815e-06, 1.266e-05], [ 9.568e-02, -1.252e-01, ..., 7.397e-06, -1.921e-05]], dtype=float32) >>> y.shape (117601,) >>> frames.shape (2048, 1806) Or frame along the first axis instead of the last: >>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64, axis=0) >>> frames.shape (1806, 2048) Frame a stereo signal: >>> y, sr = librosa.load(librosa.ex('trumpet', hq=True), mono=False) >>> y.shape (2, 117601) >>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64) (2, 2048, 1806) Carve an STFT into fixed-length patches of 32 frames with 50% overlap >>> y, sr = librosa.load(librosa.ex('trumpet')) >>> S = np.abs(librosa.stft(y)) >>> S.shape (1025, 230) >>> S_patch = librosa.util.frame(S, frame_length=32, hop_length=16) >>> S_patch.shape (1025, 32, 13) >>> # The first patch contains the first 32 frames of S >>> np.allclose(S_patch[:, :, 0], S[:, :32]) True >>> # The second patch contains frames 16 to 16+32=48, and so on >>> np.allclose(S_patch[:, :, 1], S[:, 16:48]) True """ if not isinstance(x, np.ndarray): raise ParameterError( "Input must be of type numpy.ndarray, " "given type(x)={}".format(type(x)) ) if x.shape[axis] < frame_length: raise ParameterError( "Input is too short (n={:d})" " for frame_length={:d}".format(x.shape[axis], frame_length) ) if hop_length < 1: raise ParameterError("Invalid hop_length: {:d}".format(hop_length)) if axis == -1 and not x.flags["F_CONTIGUOUS"]: warnings.warn( "librosa.util.frame called with axis={} " "on a non-contiguous input. This will result in a copy.".format(axis) ) x = np.asfortranarray(x) elif axis == 0 and not x.flags["C_CONTIGUOUS"]: warnings.warn( "librosa.util.frame called with axis={} " "on a non-contiguous input. This will result in a copy.".format(axis) ) x = np.ascontiguousarray(x) n_frames = 1 + (x.shape[axis] - frame_length) // hop_length strides = np.asarray(x.strides) new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize if axis == -1: shape = list(x.shape)[:-1] + [frame_length, n_frames] strides = list(strides) + [hop_length * new_stride] elif axis == 0: shape = [n_frames, frame_length] + list(x.shape)[1:] strides = [hop_length * new_stride] + list(strides) else: raise ParameterError("Frame axis={} must be either 0 or -1".format(axis)) return as_strided(x, shape=shape, strides=strides) @cache(level=20) def valid_audio(y, mono=True): """Determine whether a variable contains valid audio data. If ``mono=True``, then ``y`` is only considered valid if it has shape ``(N,)`` (number of samples). If ``mono=False``, then ``y`` may be either monophonic, or have shape ``(2, N)`` (stereo) or ``(K, N)`` for ``K>=2`` for general multi-channel. Parameters ---------- y : np.ndarray The input data to validate mono : bool Whether or not to require monophonic audio Returns ------- valid : bool True if all tests pass Raises ------ ParameterError In any of these cases: - ``type(y)`` is not ``np.ndarray`` - ``y.dtype`` is not floating-point - ``mono == True`` and ``y.ndim`` is not 1 - ``mono == False`` and ``y.ndim`` is not 1 or 2 - ``mono == False`` and ``y.ndim == 2`` but ``y.shape[0] == 1`` - ``np.isfinite(y).all()`` is False Notes ----- This function caches at level 20. Examples -------- >>> # By default, valid_audio allows only mono signals >>> filepath = librosa.ex('trumpet', hq=True) >>> y_mono, sr = librosa.load(filepath, mono=True) >>> y_stereo, _ = librosa.load(filepath, mono=False) >>> librosa.util.valid_audio(y_mono), librosa.util.valid_audio(y_stereo) True, False >>> # To allow stereo signals, set mono=False >>> librosa.util.valid_audio(y_stereo, mono=False) True See also -------- numpy.float32 """ if not isinstance(y, np.ndarray): raise ParameterError("Audio data must be of type numpy.ndarray") if not np.issubdtype(y.dtype, np.floating): raise ParameterError("Audio data must be floating-point") if mono and y.ndim != 1: raise ParameterError( "Invalid shape for monophonic audio: " "ndim={:d}, shape={}".format(y.ndim, y.shape) ) elif y.ndim > 2 or y.ndim == 0: raise ParameterError( "Audio data must have shape (samples,) or (channels, samples). " "Received shape={}".format(y.shape) ) elif y.ndim == 2 and y.shape[0] < 2: raise ParameterError( "Mono data must have shape (samples,). " "Received shape={}".format(y.shape) ) if not np.isfinite(y).all(): raise ParameterError("Audio buffer is not finite everywhere") return True def valid_int(x, cast=None): """Ensure that an input value is integer-typed. This is primarily useful for ensuring integrable-valued array indices. Parameters ---------- x : number A scalar value to be cast to int cast : function [optional] A function to modify ``x`` before casting. Default: `np.floor` Returns ------- x_int : int ``x_int = int(cast(x))`` Raises ------ ParameterError If ``cast`` is provided and is not callable. """ if cast is None: cast = np.floor if not callable(cast): raise ParameterError("cast parameter must be callable") return int(cast(x)) def valid_intervals(intervals): """Ensure that an array is a valid representation of time intervals: - intervals.ndim == 2 - intervals.shape[1] == 2 - intervals[i, 0] <= intervals[i, 1] for all i Parameters ---------- intervals : np.ndarray [shape=(n, 2)] set of time intervals Returns ------- valid : bool True if ``intervals`` passes validation. """ if intervals.ndim != 2 or intervals.shape[-1] != 2: raise ParameterError("intervals must have shape (n, 2)") if np.any(intervals[:, 0] > intervals[:, 1]): raise ParameterError( "intervals={} must have non-negative durations".format(intervals) ) return True def pad_center(data, size, axis=-1, **kwargs): """Pad an array to a target length along a target axis. This differs from `np.pad` by centering the data prior to padding, analogous to `str.center` Examples -------- >>> # Generate a vector >>> data = np.ones(5) >>> librosa.util.pad_center(data, 10, mode='constant') array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.]) >>> # Pad a matrix along its first dimension >>> data = np.ones((3, 5)) >>> librosa.util.pad_center(data, 7, axis=0) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> # Or its second dimension >>> librosa.util.pad_center(data, 7, axis=1) array([[ 0., 1., 1., 1., 1., 1., 0.], [ 0., 1., 1., 1., 1., 1., 0.], [ 0., 1., 1., 1., 1., 1., 0.]]) Parameters ---------- data : np.ndarray Vector to be padded and centered size : int >= len(data) [scalar] Length to pad ``data`` axis : int Axis along which to pad and center the data kwargs : additional keyword arguments arguments passed to `np.pad` Returns ------- data_padded : np.ndarray ``data`` centered and padded to length ``size`` along the specified axis Raises ------ ParameterError If ``size < data.shape[axis]`` See Also -------- numpy.pad """ kwargs.setdefault("mode", "constant") n = data.shape[axis] lpad = int((size - n) // 2) lengths = [(0, 0)] * data.ndim lengths[axis] = (lpad, int(size - n - lpad)) if lpad < 0: raise ParameterError( ("Target size ({:d}) must be " "at least input size ({:d})").format(size, n) ) return np.pad(data, lengths, **kwargs) def fix_length(data, size, axis=-1, **kwargs): """Fix the length an array ``data`` to exactly ``size`` along a target axis. If ``data.shape[axis] < n``, pad according to the provided kwargs. By default, ``data`` is padded with trailing zeros. Examples -------- >>> y = np.arange(7) >>> # Default: pad with zeros >>> librosa.util.fix_length(y, 10) array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0]) >>> # Trim to a desired length >>> librosa.util.fix_length(y, 5) array([0, 1, 2, 3, 4]) >>> # Use edge-padding instead of zeros >>> librosa.util.fix_length(y, 10, mode='edge') array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6]) Parameters ---------- data : np.ndarray array to be length-adjusted size : int >= 0 [scalar] desired length of the array axis : int, <= data.ndim axis along which to fix length kwargs : additional keyword arguments Parameters to ``np.pad`` Returns ------- data_fixed : np.ndarray [shape=data.shape] ``data`` either trimmed or padded to length ``size`` along the specified axis. See Also -------- numpy.pad """ kwargs.setdefault("mode", "constant") n = data.shape[axis] if n > size: slices = [slice(None)] * data.ndim slices[axis] = slice(0, size) return data[tuple(slices)] elif n < size: lengths = [(0, 0)] * data.ndim lengths[axis] = (0, size - n) return np.pad(data, lengths, **kwargs) return data def fix_frames(frames, x_min=0, x_max=None, pad=True): """Fix a list of frames to lie within [x_min, x_max] Examples -------- >>> # Generate a list of frame indices >>> frames = np.arange(0, 1000.0, 50) >>> frames array([ 0., 50., 100., 150., 200., 250., 300., 350., 400., 450., 500., 550., 600., 650., 700., 750., 800., 850., 900., 950.]) >>> # Clip to span at most 250 >>> librosa.util.fix_frames(frames, x_max=250) array([ 0, 50, 100, 150, 200, 250]) >>> # Or pad to span up to 2500 >>> librosa.util.fix_frames(frames, x_max=2500) array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 2500]) >>> librosa.util.fix_frames(frames, x_max=2500, pad=False) array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950]) >>> # Or starting away from zero >>> frames = np.arange(200, 500, 33) >>> frames array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497]) >>> librosa.util.fix_frames(frames) array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497]) >>> librosa.util.fix_frames(frames, x_max=500) array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497, 500]) Parameters ---------- frames : np.ndarray [shape=(n_frames,)] List of non-negative frame indices x_min : int >= 0 or None Minimum allowed frame index x_max : int >= 0 or None Maximum allowed frame index pad : boolean If ``True``, then ``frames`` is expanded to span the full range ``[x_min, x_max]`` Returns ------- fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int] Fixed frame indices, flattened and sorted Raises ------ ParameterError If ``frames`` contains negative values """ frames = np.asarray(frames) if np.any(frames < 0): raise ParameterError("Negative frame index detected") if pad and (x_min is not None or x_max is not None): frames = np.clip(frames, x_min, x_max) if pad: pad_data = [] if x_min is not None: pad_data.append(x_min) if x_max is not None: pad_data.append(x_max) frames = np.concatenate((pad_data, frames)) if x_min is not None: frames = frames[frames >= x_min] if x_max is not None: frames = frames[frames <= x_max] return np.unique(frames).astype(int) def axis_sort(S, axis=-1, index=False, value=None): """Sort an array along its rows or columns. Examples -------- Visualize NMF output for a spectrogram S >>> # Sort the columns of W by peak frequency bin >>> y, sr = librosa.load(librosa.ex('trumpet')) >>> S = np.abs(librosa.stft(y)) >>> W, H = librosa.decompose.decompose(S, n_components=64) >>> W_sort = librosa.util.axis_sort(W) Or sort by the lowest frequency bin >>> W_sort = librosa.util.axis_sort(W, value=np.argmin) Or sort the rows instead of the columns >>> W_sort_rows = librosa.util.axis_sort(W, axis=0) Get the sorting index also, and use it to permute the rows of H >>> W_sort, idx = librosa.util.axis_sort(W, index=True) >>> H_sort = H[idx, :] >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(nrows=2, ncols=2) >>> img_w = librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max), ... y_axis='log', ax=ax[0, 0]) >>> ax[0, 0].set(title='W') >>> ax[0, 0].label_outer() >>> img_act = librosa.display.specshow(H, x_axis='time', ax=ax[0, 1]) >>> ax[0, 1].set(title='H') >>> ax[0, 1].label_outer() >>> librosa.display.specshow(librosa.amplitude_to_db(W_sort, ... ref=np.max), ... y_axis='log', ax=ax[1, 0]) >>> ax[1, 0].set(title='W sorted') >>> librosa.display.specshow(H_sort, x_axis='time', ax=ax[1, 1]) >>> ax[1, 1].set(title='H sorted') >>> ax[1, 1].label_outer() >>> fig.colorbar(img_w, ax=ax[:, 0], orientation='horizontal') >>> fig.colorbar(img_act, ax=ax[:, 1], orientation='horizontal') Parameters ---------- S : np.ndarray [shape=(d, n)] Array to be sorted axis : int [scalar] The axis along which to compute the sorting values - ``axis=0`` to sort rows by peak column index - ``axis=1`` to sort columns by peak row index index : boolean [scalar] If true, returns the index array as well as the permuted data. value : function function to return the index corresponding to the sort order. Default: `np.argmax`. Returns ------- S_sort : np.ndarray [shape=(d, n)] ``S`` with the columns or rows permuted in sorting order idx : np.ndarray (optional) [shape=(d,) or (n,)] If ``index == True``, the sorting index used to permute ``S``. Length of ``idx`` corresponds to the selected ``axis``. Raises ------ ParameterError If ``S`` does not have exactly 2 dimensions (``S.ndim != 2``) """ if value is None: value = np.argmax if S.ndim != 2: raise ParameterError("axis_sort is only defined for 2D arrays") bin_idx = value(S, axis=np.mod(1 - axis, S.ndim)) idx = np.argsort(bin_idx) sort_slice = [slice(None)] * S.ndim sort_slice[axis] = idx if index: return S[tuple(sort_slice)], idx else: return S[tuple(sort_slice)] @cache(level=40) def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None): """Normalize an array along a chosen axis. Given a norm (described below) and a target axis, the input array is scaled so that:: norm(S, axis=axis) == 1 For example, ``axis=0`` normalizes each column of a 2-d array by aggregating over the rows (0-axis). Similarly, ``axis=1`` normalizes each row of a 2-d array. This function also supports thresholding small-norm slices: any slice (i.e., row or column) with norm below a specified ``threshold`` can be left un-normalized, set to all-zeros, or filled with uniform non-zero values that normalize to 1. Note: the semantics of this function differ from `scipy.linalg.norm` in two ways: multi-dimensional arrays are supported, but matrix-norms are not. Parameters ---------- S : np.ndarray The matrix to normalize norm : {np.inf, -np.inf, 0, float > 0, None} - `np.inf` : maximum absolute value - `-np.inf` : minimum absolute value - `0` : number of non-zeros (the support) - float : corresponding l_p norm See `scipy.linalg.norm` for details. - None : no normalization is performed axis : int [scalar] Axis along which to compute the norm. threshold : number > 0 [optional] Only the columns (or rows) with norm at least ``threshold`` are normalized. By default, the threshold is determined from the numerical precision of ``S.dtype``. fill : None or bool If None, then columns (or rows) with norm below ``threshold`` are left as is. If False, then columns (rows) with norm below ``threshold`` are set to 0. If True, then columns (rows) with norm below ``threshold`` are filled uniformly such that the corresponding norm is 1. .. note:: ``fill=True`` is incompatible with ``norm=0`` because no uniform vector exists with l0 "norm" equal to 1. Returns ------- S_norm : np.ndarray [shape=S.shape] Normalized array Raises ------ ParameterError If ``norm`` is not among the valid types defined above If ``S`` is not finite If ``fill=True`` and ``norm=0`` See Also -------- scipy.linalg.norm Notes ----- This function caches at level 40. Examples -------- >>> # Construct an example matrix >>> S = np.vander(np.arange(-2.0, 2.0)) >>> S array([[-8., 4., -2., 1.], [-1., 1., -1., 1.], [ 0., 0., 0., 1.], [ 1., 1., 1., 1.]]) >>> # Max (l-infinity)-normalize the columns >>> librosa.util.normalize(S) array([[-1. , 1. , -1. , 1. ], [-0.125, 0.25 , -0.5 , 1. ], [ 0. , 0. , 0. , 1. ], [ 0.125, 0.25 , 0.5 , 1. ]]) >>> # Max (l-infinity)-normalize the rows >>> librosa.util.normalize(S, axis=1) array([[-1. , 0.5 , -0.25 , 0.125], [-1. , 1. , -1. , 1. ], [ 0. , 0. , 0. , 1. ], [ 1. , 1. , 1. , 1. ]]) >>> # l1-normalize the columns >>> librosa.util.normalize(S, norm=1) array([[-0.8 , 0.667, -0.5 , 0.25 ], [-0.1 , 0.167, -0.25 , 0.25 ], [ 0. , 0. , 0. , 0.25 ], [ 0.1 , 0.167, 0.25 , 0.25 ]]) >>> # l2-normalize the columns >>> librosa.util.normalize(S, norm=2) array([[-0.985, 0.943, -0.816, 0.5 ], [-0.123, 0.236, -0.408, 0.5 ], [ 0. , 0. , 0. , 0.5 ], [ 0.123, 0.236, 0.408, 0.5 ]]) >>> # Thresholding and filling >>> S[:, -1] = 1e-308 >>> S array([[ -8.000e+000, 4.000e+000, -2.000e+000, 1.000e-308], [ -1.000e+000, 1.000e+000, -1.000e+000, 1.000e-308], [ 0.000e+000, 0.000e+000, 0.000e+000, 1.000e-308], [ 1.000e+000, 1.000e+000, 1.000e+000, 1.000e-308]]) >>> # By default, small-norm columns are left untouched >>> librosa.util.normalize(S) array([[ -1.000e+000, 1.000e+000, -1.000e+000, 1.000e-308], [ -1.250e-001, 2.500e-001, -5.000e-001, 1.000e-308], [ 0.000e+000, 0.000e+000, 0.000e+000, 1.000e-308], [ 1.250e-001, 2.500e-001, 5.000e-001, 1.000e-308]]) >>> # Small-norm columns can be zeroed out >>> librosa.util.normalize(S, fill=False) array([[-1. , 1. , -1. , 0. ], [-0.125, 0.25 , -0.5 , 0. ], [ 0. , 0. , 0. , 0. ], [ 0.125, 0.25 , 0.5 , 0. ]]) >>> # Or set to constant with unit-norm >>> librosa.util.normalize(S, fill=True) array([[-1. , 1. , -1. , 1. ], [-0.125, 0.25 , -0.5 , 1. ], [ 0. , 0. , 0. , 1. ], [ 0.125, 0.25 , 0.5 , 1. ]]) >>> # With an l1 norm instead of max-norm >>> librosa.util.normalize(S, norm=1, fill=True) array([[-0.8 , 0.667, -0.5 , 0.25 ], [-0.1 , 0.167, -0.25 , 0.25 ], [ 0. , 0. , 0. , 0.25 ], [ 0.1 , 0.167, 0.25 , 0.25 ]]) """ # Avoid div-by-zero if threshold is None: threshold = tiny(S) elif threshold <= 0: raise ParameterError( "threshold={} must be strictly " "positive".format(threshold) ) if fill not in [None, False, True]: raise ParameterError("fill={} must be None or boolean".format(fill)) if not np.all(np.isfinite(S)): raise ParameterError("Input must be finite") # All norms only depend on magnitude, let's do that first mag = np.abs(S).astype(np.float) # For max/min norms, filling with 1 works fill_norm = 1 if norm == np.inf: length = np.max(mag, axis=axis, keepdims=True) elif norm == -np.inf: length = np.min(mag, axis=axis, keepdims=True) elif norm == 0: if fill is True: raise ParameterError("Cannot normalize with norm=0 and fill=True") length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype) elif np.issubdtype(type(norm), np.number) and norm > 0: length = np.sum(mag ** norm, axis=axis, keepdims=True) ** (1.0 / norm) if axis is None: fill_norm = mag.size ** (-1.0 / norm) else: fill_norm = mag.shape[axis] ** (-1.0 / norm) elif norm is None: return S else: raise ParameterError("Unsupported norm: {}".format(repr(norm))) # indices where norm is below the threshold small_idx = length < threshold Snorm = np.empty_like(S) if fill is None: # Leave small indices un-normalized length[small_idx] = 1.0 Snorm[:] = S / length elif fill: # If we have a non-zero fill value, we locate those entries by # doing a nan-divide. # If S was finite, then length is finite (except for small positions) length[small_idx] = np.nan Snorm[:] = S / length Snorm[np.isnan(Snorm)] = fill_norm else: # Set small values to zero by doing an inf-divide. # This is safe (by IEEE-754) as long as S is finite. length[small_idx] = np.inf Snorm[:] = S / length return Snorm def localmax(x, axis=0): """Find local maxima in an array An element ``x[i]`` is considered a local maximum if the following conditions are met: - ``x[i] > x[i-1]`` - ``x[i] >= x[i+1]`` Note that the first condition is strict, and that the first element ``x[0]`` will never be considered as a local maximum. Examples -------- >>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1]) >>> librosa.util.localmax(x) array([False, False, False, True, False, True, False, True], dtype=bool) >>> # Two-dimensional example >>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]]) >>> librosa.util.localmax(x, axis=0) array([[False, False, False], [ True, False, False], [False, True, True]], dtype=bool) >>> librosa.util.localmax(x, axis=1) array([[False, False, True], [False, False, True], [False, False, True]], dtype=bool) Parameters ---------- x : np.ndarray [shape=(d1,d2,...)] input vector or array axis : int axis along which to compute local maximality Returns ------- m : np.ndarray [shape=x.shape, dtype=bool] indicator array of local maximality along ``axis`` See Also -------- localmin """ paddings = [(0, 0)] * x.ndim paddings[axis] = (1, 1) x_pad = np.pad(x, paddings, mode="edge") inds1 = [slice(None)] * x.ndim inds1[axis] = slice(0, -2) inds2 = [slice(None)] * x.ndim inds2[axis] = slice(2, x_pad.shape[axis]) return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)]) def localmin(x, axis=0): """Find local minima in an array An element ``x[i]`` is considered a local minimum if the following conditions are met: - ``x[i] < x[i-1]`` - ``x[i] <= x[i+1]`` Note that the first condition is strict, and that the first element ``x[0]`` will never be considered as a local minimum. Examples -------- >>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1]) >>> librosa.util.localmin(x) array([False, True, False, False, True, False, True, False]) >>> # Two-dimensional example >>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]]) >>> librosa.util.localmin(x, axis=0) array([[False, False, False], [False, True, True], [False, False, False]]) >>> librosa.util.localmin(x, axis=1) array([[False, True, False], [False, True, False], [False, True, False]]) Parameters ---------- x : np.ndarray [shape=(d1,d2,...)] input vector or array axis : int axis along which to compute local minimality Returns ------- m : np.ndarray [shape=x.shape, dtype=bool] indicator array of local minimality along ``axis`` See Also -------- localmax """ paddings = [(0, 0)] * x.ndim paddings[axis] = (1, 1) x_pad = np.pad(x, paddings, mode="edge") inds1 = [slice(None)] * x.ndim inds1[axis] = slice(0, -2) inds2 = [slice(None)] * x.ndim inds2[axis] = slice(2, x_pad.shape[axis]) return (x < x_pad[tuple(inds1)]) & (x <= x_pad[tuple(inds2)]) def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait): """Uses a flexible heuristic to pick peaks in a signal. A sample n is selected as an peak if the corresponding ``x[n]`` fulfills the following three conditions: 1. ``x[n] == max(x[n - pre_max:n + post_max])`` 2. ``x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta`` 3. ``n - previous_n > wait`` where ``previous_n`` is the last sample picked as a peak (greedily). This implementation is based on [#]_ and [#]_. .. [#] Boeck, Sebastian, Florian Krebs, and Markus Schedl. "Evaluating the Online Capabilities of Onset Detection Methods." ISMIR. 2012. .. [#] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py Parameters ---------- x : np.ndarray [shape=(n,)] input signal to peak picks from pre_max : int >= 0 [scalar] number of samples before ``n`` over which max is computed post_max : int >= 1 [scalar] number of samples after ``n`` over which max is computed pre_avg : int >= 0 [scalar] number of samples before ``n`` over which mean is computed post_avg : int >= 1 [scalar] number of samples after ``n`` over which mean is computed delta : float >= 0 [scalar] threshold offset for mean wait : int >= 0 [scalar] number of samples to wait after picking a peak Returns ------- peaks : np.ndarray [shape=(n_peaks,), dtype=int] indices of peaks in ``x`` Raises ------ ParameterError If any input lies outside its defined range Examples -------- >>> y, sr = librosa.load(librosa.ex('trumpet')) >>> onset_env = librosa.onset.onset_strength(y=y, sr=sr, ... hop_length=512, ... aggregate=np.median) >>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10) >>> peaks array([ 3, 27, 40, 61, 72, 88, 103]) >>> import matplotlib.pyplot as plt >>> times = librosa.times_like(onset_env, sr=sr, hop_length=512) >>> fig, ax = plt.subplots(nrows=2, sharex=True) >>> D = np.abs(librosa.stft(y)) >>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max), ... y_axis='log', x_axis='time', ax=ax[1]) >>> ax[0].plot(times, onset_env, alpha=0.8, label='Onset strength') >>> ax[0].vlines(times[peaks], 0, ... onset_env.max(), color='r', alpha=0.8, ... label='Selected peaks') >>> ax[0].legend(frameon=True, framealpha=0.8) >>> ax[0].label_outer() """ if pre_max < 0: raise ParameterError("pre_max must be non-negative") if pre_avg < 0: raise ParameterError("pre_avg must be non-negative") if delta < 0: raise ParameterError("delta must be non-negative") if wait < 0: raise ParameterError("wait must be non-negative") if post_max <= 0: raise ParameterError("post_max must be positive") if post_avg <= 0: raise ParameterError("post_avg must be positive") if x.ndim != 1: raise ParameterError("input array must be one-dimensional") # Ensure valid index types pre_max = valid_int(pre_max, cast=np.ceil) post_max = valid_int(post_max, cast=np.ceil) pre_avg = valid_int(pre_avg, cast=np.ceil) post_avg = valid_int(post_avg, cast=np.ceil) wait = valid_int(wait, cast=np.ceil) # Get the maximum of the signal over a sliding window max_length = pre_max + post_max max_origin = np.ceil(0.5 * (pre_max - post_max)) # Using mode='constant' and cval=x.min() effectively truncates # the sliding window at the boundaries mov_max = scipy.ndimage.filters.maximum_filter1d( x, int(max_length), mode="constant", origin=int(max_origin), cval=x.min() ) # Get the mean of the signal over a sliding window avg_length = pre_avg + post_avg avg_origin = np.ceil(0.5 * (pre_avg - post_avg)) # Here, there is no mode which results in the behavior we want, # so we'll correct below. mov_avg = scipy.ndimage.filters.uniform_filter1d( x, int(avg_length), mode="nearest", origin=int(avg_origin) ) # Correct sliding average at the beginning n = 0 # Only need to correct in the range where the window needs to be truncated while n - pre_avg < 0 and n < x.shape[0]: # This just explicitly does mean(x[n - pre_avg:n + post_avg]) # with truncation start = n - pre_avg start = start if start > 0 else 0 mov_avg[n] = np.mean(x[start : n + post_avg]) n += 1 # Correct sliding average at the end n = x.shape[0] - post_avg # When post_avg > x.shape[0] (weird case), reset to 0 n = n if n > 0 else 0 while n < x.shape[0]: start = n - pre_avg start = start if start > 0 else 0 mov_avg[n] = np.mean(x[start : n + post_avg]) n += 1 # First mask out all entries not equal to the local max detections = x * (x == mov_max) # Then mask out all entries less than the thresholded average detections = detections * (detections >= (mov_avg + delta)) # Initialize peaks array, to be filled greedily peaks = [] # Remove onsets which are close together in time last_onset = -np.inf for i in np.nonzero(detections)[0]: # Only report an onset if the "wait" samples was reported if i > last_onset + wait: peaks.append(i) # Save last reported onset last_onset = i return np.array(peaks) @cache(level=40) def sparsify_rows(x, quantile=0.01, dtype=None): """Return a row-sparse matrix approximating the input Parameters ---------- x : np.ndarray [ndim <= 2] The input matrix to sparsify. quantile : float in [0, 1.0) Percentage of magnitude to discard in each row of ``x`` dtype : np.dtype, optional The dtype of the output array. If not provided, then ``x.dtype`` will be used. Returns ------- x_sparse : ``scipy.sparse.csr_matrix`` [shape=x.shape] Row-sparsified approximation of ``x`` If ``x.ndim == 1``, then ``x`` is interpreted as a row vector, and ``x_sparse.shape == (1, len(x))``. Raises ------ ParameterError If ``x.ndim > 2`` If ``quantile`` lies outside ``[0, 1.0)`` Notes ----- This function caches at level 40. Examples -------- >>> # Construct a Hann window to sparsify >>> x = scipy.signal.hann(32) >>> x array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156, 0.09 , 0.041, 0.01 , 0. ]) >>> # Discard the bottom percentile >>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01) >>> x_sparse <1x32 sparse matrix of type '<type 'numpy.float64'>' with 26 stored elements in Compressed Sparse Row format> >>> x_sparse.todense() matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156, 0.09 , 0. , 0. , 0. ]]) >>> # Discard up to the bottom 10th percentile >>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1) >>> x_sparse <1x32 sparse matrix of type '<type 'numpy.float64'>' with 20 stored elements in Compressed Sparse Row format> >>> x_sparse.todense() matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. , 0. , 0. , 0. , 0. ]]) """ if x.ndim == 1: x = x.reshape((1, -1)) elif x.ndim > 2: raise ParameterError( "Input must have 2 or fewer dimensions. " "Provided x.shape={}.".format(x.shape) ) if not 0.0 <= quantile < 1: raise ParameterError("Invalid quantile {:.2f}".format(quantile)) if dtype is None: dtype = x.dtype x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=dtype) mags = np.abs(x) norms = np.sum(mags, axis=1, keepdims=True) mag_sort = np.sort(mags, axis=1) cumulative_mag = np.cumsum(mag_sort / norms, axis=1) threshold_idx = np.argmin(cumulative_mag < quantile, axis=1) for i, j in enumerate(threshold_idx): idx = np.where(mags[i] >= mag_sort[i, j]) x_sparse[i, idx] = x[i, idx] return x_sparse.tocsr() def buf_to_float(x, n_bytes=2, dtype=np.float32): """Convert an integer buffer to floating point values. This is primarily useful when loading integer-valued wav data into numpy arrays. Parameters ---------- x : np.ndarray [dtype=int] The integer-valued data buffer n_bytes : int [1, 2, 4] The number of bytes per sample in ``x`` dtype : numeric type The target output type (default: 32-bit float) Returns ------- x_float : np.ndarray [dtype=float] The input data buffer cast to floating point """ # Invert the scale of the data scale = 1.0 / float(1 << ((8 * n_bytes) - 1)) # Construct the format string fmt = "<i{:d}".format(n_bytes) # Rescale and format the data buffer return scale * np.frombuffer(x, fmt).astype(dtype) def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True): """Generate a slice array from an index array. Parameters ---------- idx : list-like Array of index boundaries idx_min, idx_max : None or int Minimum and maximum allowed indices step : None or int Step size for each slice. If `None`, then the default step of 1 is used. pad : boolean If `True`, pad ``idx`` to span the range ``idx_min:idx_max``. Returns ------- slices : list of slice ``slices[i] = slice(idx[i], idx[i+1], step)`` Additional slice objects may be added at the beginning or end, depending on whether ``pad==True`` and the supplied values for ``idx_min`` and ``idx_max``. See Also -------- fix_frames Examples -------- >>> # Generate slices from spaced indices >>> librosa.util.index_to_slice(np.arange(20, 100, 15)) [slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None), slice(80, 95, None)] >>> # Pad to span the range (0, 100) >>> librosa.util.index_to_slice(np.arange(20, 100, 15), ... idx_min=0, idx_max=100) [slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)] >>> # Use a step of 5 for each slice >>> librosa.util.index_to_slice(np.arange(20, 100, 15), ... idx_min=0, idx_max=100, step=5) [slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5), slice(80, 95, 5), slice(95, 100, 5)] """ # First, normalize the index set idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad) # Now convert the indices to slices return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])] @cache(level=40) def sync(data, idx, aggregate=None, pad=True, axis=-1): """Synchronous aggregation of a multi-dimensional array between boundaries .. note:: In order to ensure total coverage, boundary points may be added to ``idx``. If synchronizing a feature matrix against beat tracker output, ensure that frame index numbers are properly aligned and use the same hop length. Parameters ---------- data : np.ndarray multi-dimensional array of features idx : iterable of ints or slices Either an ordered array of boundary indices, or an iterable collection of slice objects. aggregate : function aggregation function (default: `np.mean`) pad : boolean If `True`, ``idx`` is padded to span the full range ``[0, data.shape[axis]]`` axis : int The axis along which to aggregate data Returns ------- data_sync : ndarray ``data_sync`` will have the same dimension as ``data``, except that the ``axis`` coordinate will be reduced according to ``idx``. For example, a 2-dimensional ``data`` with ``axis=-1`` should satisfy:: data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1) Raises ------ ParameterError If the index set is not of consistent type (all slices or all integers) Notes ----- This function caches at level 40. Examples -------- Beat-synchronous CQT spectra >>> y, sr = librosa.load(librosa.ex('choice')) >>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False) >>> C = np.abs(librosa.cqt(y=y, sr=sr)) >>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1]) By default, use mean aggregation >>> C_avg = librosa.util.sync(C, beats) Use median-aggregation instead of mean >>> C_med = librosa.util.sync(C, beats, ... aggregate=np.median) Or sub-beat synchronization >>> sub_beats = librosa.segment.subsegment(C, beats) >>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1]) >>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median) Plot the results >>> import matplotlib.pyplot as plt >>> beat_t = librosa.frames_to_time(beats, sr=sr) >>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr) >>> fig, ax = plt.subplots(nrows=3, sharex=True, sharey=True) >>> librosa.display.specshow(librosa.amplitude_to_db(C, ... ref=np.max), ... x_axis='time', ax=ax[0]) >>> ax[0].set(title='CQT power, shape={}'.format(C.shape)) >>> ax[0].label_outer() >>> librosa.display.specshow(librosa.amplitude_to_db(C_med, ... ref=np.max), ... x_coords=beat_t, x_axis='time', ax=ax[1]) >>> ax[1].set(title='Beat synchronous CQT power, ' ... 'shape={}'.format(C_med.shape)) >>> ax[1].label_outer() >>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub, ... ref=np.max), ... x_coords=subbeat_t, x_axis='time', ax=ax[2]) >>> ax[2].set(title='Sub-beat synchronous CQT power, ' ... 'shape={}'.format(C_med_sub.shape)) """ if aggregate is None: aggregate = np.mean shape = list(data.shape) if np.all([isinstance(_, slice) for _ in idx]): slices = idx elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]): slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad) else: raise ParameterError("Invalid index set: {}".format(idx)) agg_shape = list(shape) agg_shape[axis] = len(slices) data_agg = np.empty( agg_shape, order="F" if np.isfortran(data) else "C", dtype=data.dtype ) idx_in = [slice(None)] * data.ndim idx_agg = [slice(None)] * data_agg.ndim for (i, segment) in enumerate(slices): idx_in[axis] = segment idx_agg[axis] = i data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis) return data_agg def softmask(X, X_ref, power=1, split_zeros=False): """Robustly compute a soft-mask operation. ``M = X**power / (X**power + X_ref**power)`` Parameters ---------- X : np.ndarray The (non-negative) input array corresponding to the positive mask elements X_ref : np.ndarray The (non-negative) array of reference or background elements. Must have the same shape as ``X``. power : number > 0 or np.inf If finite, returns the soft mask computed in a numerically stable way If infinite, returns a hard (binary) mask equivalent to ``X > X_ref``. Note: for hard masks, ties are always broken in favor of ``X_ref`` (``mask=0``). split_zeros : bool If `True`, entries where ``X`` and ``X_ref`` are both small (close to 0) will receive mask values of 0.5. Otherwise, the mask is set to 0 for these entries. Returns ------- mask : np.ndarray, shape=X.shape The output mask array Raises ------ ParameterError If ``X`` and ``X_ref`` have different shapes. If ``X`` or ``X_ref`` are negative anywhere If ``power <= 0`` Examples -------- >>> X = 2 * np.ones((3, 3)) >>> X_ref = np.vander(np.arange(3.0)) >>> X array([[ 2., 2., 2.], [ 2., 2., 2.], [ 2., 2., 2.]]) >>> X_ref array([[ 0., 0., 1.], [ 1., 1., 1.], [ 4., 2., 1.]]) >>> librosa.util.softmask(X, X_ref, power=1) array([[ 1. , 1. , 0.667], [ 0.667, 0.667, 0.667], [ 0.333, 0.5 , 0.667]]) >>> librosa.util.softmask(X_ref, X, power=1) array([[ 0. , 0. , 0.333], [ 0.333, 0.333, 0.333], [ 0.667, 0.5 , 0.333]]) >>> librosa.util.softmask(X, X_ref, power=2) array([[ 1. , 1. , 0.8], [ 0.8, 0.8, 0.8], [ 0.2, 0.5, 0.8]]) >>> librosa.util.softmask(X, X_ref, power=4) array([[ 1. , 1. , 0.941], [ 0.941, 0.941, 0.941], [ 0.059, 0.5 , 0.941]]) >>> librosa.util.softmask(X, X_ref, power=100) array([[ 1.000e+00, 1.000e+00, 1.000e+00], [ 1.000e+00, 1.000e+00, 1.000e+00], [ 7.889e-31, 5.000e-01, 1.000e+00]]) >>> librosa.util.softmask(X, X_ref, power=np.inf) array([[ True, True, True], [ True, True, True], [False, False, True]], dtype=bool) """ if X.shape != X_ref.shape: raise ParameterError("Shape mismatch: {}!={}".format(X.shape, X_ref.shape)) if np.any(X < 0) or np.any(X_ref < 0): raise ParameterError("X and X_ref must be non-negative") if power <= 0: raise ParameterError("power must be strictly positive") # We're working with ints, cast to float. dtype = X.dtype if not np.issubdtype(dtype, np.floating): dtype = np.float32 # Re-scale the input arrays relative to the larger value Z = np.maximum(X, X_ref).astype(dtype) bad_idx = Z < np.finfo(dtype).tiny Z[bad_idx] = 1 # For finite power, compute the softmask if np.isfinite(power): mask = (X / Z) ** power ref_mask = (X_ref / Z) ** power good_idx = ~bad_idx mask[good_idx] /= mask[good_idx] + ref_mask[good_idx] # Wherever energy is below energy in both inputs, split the mask if split_zeros: mask[bad_idx] = 0.5 else: mask[bad_idx] = 0.0 else: # Otherwise, compute the hard mask mask = X > X_ref return mask def tiny(x): """Compute the tiny-value corresponding to an input's data type. This is the smallest "usable" number representable in ``x.dtype`` (e.g., float32). This is primarily useful for determining a threshold for numerical underflow in division or multiplication operations. Parameters ---------- x : number or np.ndarray The array to compute the tiny-value for. All that matters here is ``x.dtype`` Returns ------- tiny_value : float The smallest positive usable number for the type of ``x``. If ``x`` is integer-typed, then the tiny value for ``np.float32`` is returned instead. See Also -------- numpy.finfo Examples -------- For a standard double-precision floating point number: >>> librosa.util.tiny(1.0) 2.2250738585072014e-308 Or explicitly as double-precision >>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64)) 2.2250738585072014e-308 Or complex numbers >>> librosa.util.tiny(1j) 2.2250738585072014e-308 Single-precision floating point: >>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32)) 1.1754944e-38 Integer >>> librosa.util.tiny(5) 1.1754944e-38 """ # Make sure we have an array view x = np.asarray(x) # Only floating types generate a tiny if np.issubdtype(x.dtype, np.floating) or np.issubdtype( x.dtype, np.complexfloating ): dtype = x.dtype else: dtype = np.float32 return np.finfo(dtype).tiny def fill_off_diagonal(x, radius, value=0): """Sets all cells of a matrix to a given ``value`` if they lie outside a constraint region. In this case, the constraint region is the Sakoe-Chiba band which runs with a fixed ``radius`` along the main diagonal. When ``x.shape[0] != x.shape[1]``, the radius will be expanded so that ``x[-1, -1] = 1`` always. ``x`` will be modified in place. Parameters ---------- x : np.ndarray [shape=(N, M)] Input matrix, will be modified in place. radius : float The band radius (1/2 of the width) will be ``int(radius*min(x.shape))`` value : int ``x[n, m] = value`` when ``(n, m)`` lies outside the band. Examples -------- >>> x = np.ones((8, 8)) >>> librosa.util.fill_off_diagonal(x, 0.25) >>> x array([[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]) >>> x = np.ones((8, 12)) >>> librosa.util.fill_off_diagonal(x, 0.25) >>> x array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]]) """ nx, ny = x.shape # Calculate the radius in indices, rather than proportion radius = np.round(radius * np.min(x.shape)) nx, ny = x.shape offset = np.abs((x.shape[0] - x.shape[1])) if nx < ny: idx_u = np.triu_indices_from(x, k=radius + offset) idx_l = np.tril_indices_from(x, k=-radius) else: idx_u = np.triu_indices_from(x, k=radius) idx_l = np.tril_indices_from(x, k=-radius - offset) # modify input matrix x[idx_u] = value x[idx_l] = value def cyclic_gradient(data, edge_order=1, axis=-1): """Estimate the gradient of a function over a uniformly sampled, periodic domain. This is essentially the same as `np.gradient`, except that edge effects are handled by wrapping the observations (i.e. assuming periodicity) rather than extrapolation. Parameters ---------- data : np.ndarray The function values observed at uniformly spaced positions on a periodic domain edge_order: {1, 2} The order of the difference approximation used for estimating the gradient axis : int The axis along which gradients are calculated. Returns ------- grad : np.ndarray like ``data`` The gradient of ``data`` taken along the specified axis. See Also -------- numpy.gradient Examples -------- This example estimates the gradient of cosine (-sine) from 64 samples using direct (aperiodic) and periodic gradient calculation. >>> import matplotlib.pyplot as plt >>> x = 2 * np.pi * np.linspace(0, 1, num=64, endpoint=False) >>> y = np.cos(x) >>> grad = np.gradient(y) >>> cyclic_grad = librosa.util.cyclic_gradient(y) >>> true_grad = -np.sin(x) * 2 * np.pi / len(x) >>> fig, ax = plt.subplots() >>> ax.plot(x, true_grad, label='True gradient', linewidth=5, ... alpha=0.35) >>> ax.plot(x, cyclic_grad, label='cyclic_gradient') >>> ax.plot(x, grad, label='np.gradient', linestyle=':') >>> ax.legend() >>> # Zoom into the first part of the sequence >>> ax.set(xlim=[0, np.pi/16], ylim=[-0.025, 0.025]) """ # Wrap-pad the data along the target axis by `edge_order` on each side padding = [(0, 0)] * data.ndim padding[axis] = (edge_order, edge_order) data_pad = np.pad(data, padding, mode="wrap") # Compute the gradient grad = np.gradient(data_pad, edge_order=edge_order, axis=axis) # Remove the padding slices = [slice(None)] * data.ndim slices[axis] = slice(edge_order, -edge_order) return grad[tuple(slices)] @numba.jit(nopython=True, cache=True) def __shear_dense(X, factor=+1, axis=-1): """Numba-accelerated shear for dense (ndarray) arrays""" if axis == 0: X = X.T X_shear = np.empty_like(X) for i in range(X.shape[1]): X_shear[:, i] = np.roll(X[:, i], factor * i) if axis == 0: X_shear = X_shear.T return X_shear def __shear_sparse(X, factor=+1, axis=-1): """Fast shearing for sparse matrices Shearing is performed using CSC array indices, and the result is converted back to whatever sparse format the data was originally provided in. """ fmt = X.format if axis == 0: X = X.T # Now we're definitely rolling on the correct axis X_shear = X.tocsc(copy=True) # The idea here is to repeat the shear amount (factor * range) # by the number of non-zeros for each column. # The number of non-zeros is computed by diffing the index pointer array roll = np.repeat(factor * np.arange(X_shear.shape[1]), np.diff(X_shear.indptr)) # In-place roll np.mod(X_shear.indices + roll, X_shear.shape[0], out=X_shear.indices) if axis == 0: X_shear = X_shear.T # And convert back to the input format return X_shear.asformat(fmt) def shear(X, factor=1, axis=-1): """Shear a matrix by a given factor. The column ``X[:, n]`` will be displaced (rolled) by ``factor * n`` This is primarily useful for converting between lag and recurrence representations: shearing with ``factor=-1`` converts the main diagonal to a horizontal. Shearing with ``factor=1`` converts a horizontal to a diagonal. Parameters ---------- X : np.ndarray [ndim=2] or scipy.sparse matrix The array to be sheared factor : integer The shear factor: ``X[:, n] -> np.roll(X[:, n], factor * n)`` axis : integer The axis along which to shear Returns ------- X_shear : same type as ``X`` The sheared matrix Examples -------- >>> E = np.eye(3) >>> librosa.util.shear(E, factor=-1, axis=-1) array([[1., 1., 1.], [0., 0., 0.], [0., 0., 0.]]) >>> librosa.util.shear(E, factor=-1, axis=0) array([[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]]) >>> librosa.util.shear(E, factor=1, axis=-1) array([[1., 0., 0.], [0., 0., 1.], [0., 1., 0.]]) """ if not np.issubdtype(type(factor), np.integer): raise ParameterError("factor={} must be integer-valued".format(factor)) if scipy.sparse.isspmatrix(X): return __shear_sparse(X, factor=factor, axis=axis) else: return __shear_dense(X, factor=factor, axis=axis) def stack(arrays, axis=0): """Stack one or more arrays along a target axis. This function is similar to `np.stack`, except that memory contiguity is retained when stacking along the first dimension. This is useful when combining multiple monophonic audio signals into a multi-channel signal, or when stacking multiple feature representations to form a multi-dimensional array. Parameters ---------- arrays : list one or more `np.ndarray` axis : integer The target axis along which to stack. ``axis=0`` creates a new first axis, and ``axis=-1`` creates a new last axis. Returns ------- arr_stack : np.ndarray [shape=(len(arrays), array_shape) or shape=(array_shape, len(arrays))] The input arrays, stacked along the target dimension. If ``axis=0``, then ``arr_stack`` will be F-contiguous. Otherwise, ``arr_stack`` will be C-contiguous by default, as computed by `np.stack`. Raises ------ ParameterError - If ``arrays`` do not all have the same shape - If no ``arrays`` are given See Also -------- numpy.stack numpy.ndarray.flags frame Examples -------- Combine two buffers into a contiguous arrays >>> y_left = np.ones(5) >>> y_right = -np.ones(5) >>> y_stereo = librosa.util.stack([y_left, y_right], axis=0) >>> y_stereo array([[ 1., 1., 1., 1., 1.], [-1., -1., -1., -1., -1.]]) >>> y_stereo.flags C_CONTIGUOUS : False F_CONTIGUOUS : True OWNDATA : True WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False Or along the trailing axis >>> y_stereo = librosa.util.stack([y_left, y_right], axis=-1) >>> y_stereo array([[ 1., -1.], [ 1., -1.], [ 1., -1.], [ 1., -1.], [ 1., -1.]]) >>> y_stereo.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False """ shapes = {arr.shape for arr in arrays} if len(shapes) > 1: raise ParameterError("all input arrays must have the same shape") elif len(shapes) < 1: raise ParameterError("at least one input array must be provided for stack") shape_in = shapes.pop() if axis != 0: return np.stack(arrays, axis=axis) else: # If axis is 0, enforce F-ordering shape = tuple([len(arrays)] + list(shape_in)) # Find the common dtype for all inputs dtype = np.find_common_type([arr.dtype for arr in arrays], []) # Allocate an empty array of the right shape and type result = np.empty(shape, dtype=dtype, order="F") # Stack into the preallocated buffer np.stack(arrays, axis=axis, out=result) return result def dtype_r2c(d, default=np.complex64): """Find the complex numpy dtype corresponding to a real dtype. This is used to maintain numerical precision and memory footprint when constructing complex arrays from real-valued data (e.g. in a Fourier transform). A `float32` (single-precision) type maps to `complex64`, while a `float64` (double-precision) maps to `complex128`. Parameters ---------- d : np.dtype The real-valued dtype to convert to complex. If ``d`` is a complex type already, it will be returned. default : np.dtype, optional The default complex target type, if ``d`` does not match a known dtype Returns ------- d_c : np.dtype The complex dtype See Also -------- dtype_c2r numpy.dtype Examples -------- >>> librosa.util.dtype_r2c(np.float32) dtype('complex64') >>> librosa.util.dtype_r2c(np.int16) dtype('complex64') >>> librosa.util.dtype_r2c(np.complex128) dtype('complex128') """ mapping = { np.dtype(np.float32): np.complex64, np.dtype(np.float64): np.complex128, np.dtype(np.float): np.complex, } # If we're given a complex type already, return it dt = np.dtype(d) if dt.kind == "c": return dt # Otherwise, try to map the dtype. # If no match is found, return the default. return np.dtype(mapping.get(dt, default)) def dtype_c2r(d, default=np.float32): """Find the real numpy dtype corresponding to a complex dtype. This is used to maintain numerical precision and memory footprint when constructing real arrays from complex-valued data (e.g. in an inverse Fourier transform). A `complex64` (single-precision) type maps to `float32`, while a `complex128` (double-precision) maps to `float64`. Parameters ---------- d : np.dtype The complex-valued dtype to convert to real. If ``d`` is a real (float) type already, it will be returned. default : np.dtype, optional The default real target type, if ``d`` does not match a known dtype Returns ------- d_r : np.dtype The real dtype See Also -------- dtype_r2c numpy.dtype Examples -------- >>> librosa.util.dtype_r2c(np.complex64) dtype('float32') >>> librosa.util.dtype_r2c(np.float32) dtype('float32') >>> librosa.util.dtype_r2c(np.int16) dtype('float32') >>> librosa.util.dtype_r2c(np.complex128) dtype('float64') """ mapping = { np.dtype(np.complex64): np.float32, np.dtype(np.complex128): np.float64, np.dtype(np.complex): np.float, } # If we're given a real type already, return it dt = np.dtype(d) if dt.kind == "f": return dt # Otherwise, try to map the dtype. # If no match is found, return the default. return np.dtype(mapping.get(np.dtype(d), default))
isc
ashray/VTK-EVM
ThirdParty/Twisted/twisted/lore/man2lore.py
25
7757
# -*- test-case-name: twisted.lore.test.test_man2lore -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ man2lore: Converts man page source (i.e. groff) into lore-compatible html. This is nasty and hackish (and doesn't support lots of real groff), but is good enough for converting fairly simple man pages. """ import re, os quoteRE = re.compile('"(.*?)"') def escape(text): text = text.replace('<', '&lt;').replace('>', '&gt;') text = quoteRE.sub('<q>\\1</q>', text) return text def stripQuotes(s): if s[0] == s[-1] == '"': s = s[1:-1] return s class ManConverter(object): """ Convert a man page to the Lore format. @ivar tp: State variable for handling text inside a C{TP} token. It can take values from 0 to 3: - 0: when outside of a C{TP} token. - 1: once a C{TP} token has been encountered. If the previous value was 0, a definition list is started. Then, at the first line of text, a definition term is started. - 2: when the first line after the C{TP} token has been handled. The definition term is closed, and a definition is started with the next line of text. - 3: when the first line as definition data has been handled. @type tp: C{int} """ state = 'regular' name = None tp = 0 dl = 0 para = 0 def convert(self, inf, outf): self.write = outf.write longline = '' for line in inf.readlines(): if line.rstrip() and line.rstrip()[-1] == '\\': longline += line.rstrip()[:-1] + ' ' continue if longline: line = longline + line longline = '' self.lineReceived(line) self.closeTags() self.write('</body>\n</html>\n') outf.flush() def lineReceived(self, line): if line[0] == '.': f = getattr(self, 'macro_' + line[1:3].rstrip().upper(), None) if f: f(line[3:].strip()) else: self.text(line) def continueReceived(self, cont): if not cont: return if cont[0].isupper(): f = getattr(self, 'macro_' + cont[:2].rstrip().upper(), None) if f: f(cont[2:].strip()) else: self.text(cont) def closeTags(self): if self.state != 'regular': self.write('</%s>' % self.state) if self.tp == 3: self.write('</dd>\n\n') self.tp = 0 if self.dl: self.write('</dl>\n\n') self.dl = 0 if self.para: self.write('</p>\n\n') self.para = 0 def paraCheck(self): if not self.tp and not self.para: self.write('<p>') self.para = 1 def macro_TH(self, line): self.write( '<?xml version="1.0"?>\n' '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\n' ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n') self.write('<html><head>\n') parts = [stripQuotes(x) for x in line.split(' ', 2)] + ['', ''] title, manSection = parts[:2] self.write('<title>%s.%s</title>' % (title, manSection)) self.write('</head>\n<body>\n\n') self.write('<h1>%s.%s</h1>\n\n' % (title, manSection)) macro_DT = macro_TH def macro_SH(self, line): self.closeTags() self.write('<h2>') self.para = 1 self.text(stripQuotes(line)) self.para = 0 self.closeTags() self.write('</h2>\n\n') def macro_B(self, line): words = line.split() words[0] = '\\fB' + words[0] + '\\fR ' self.text(' '.join(words)) def macro_NM(self, line): if not self.name: self.name = line self.text(self.name + ' ') def macro_NS(self, line): parts = line.split(' Ns ') i = 0 for l in parts: i = not i if i: self.text(l) else: self.continueReceived(l) def macro_OO(self, line): self.text('[') self.continueReceived(line) def macro_OC(self, line): self.text(']') self.continueReceived(line) def macro_OP(self, line): self.text('[') self.continueReceived(line) self.text(']') def macro_FL(self, line): parts = line.split() self.text('\\fB-%s\\fR' % parts[0]) self.continueReceived(' '.join(parts[1:])) def macro_AR(self, line): parts = line.split() self.text('\\fI %s\\fR' % parts[0]) self.continueReceived(' '.join(parts[1:])) def macro_PP(self, line): self.closeTags() def macro_IC(self, line): cmd = line.split(' ', 1)[0] args = line[line.index(cmd) + len(cmd):] args = args.split(' ') text = cmd while args: arg = args.pop(0) if arg.lower() == "ar": text += " \\fU%s\\fR" % (args.pop(0),) elif arg.lower() == "op": args.pop(0) text += " [\\fU%s\\fR]" % (args.pop(0),) self.text(text) def macro_TP(self, line): """ Handle C{TP} token: start a definition list if it's first token, or close previous definition data. """ if self.tp == 3: self.write('</dd>\n\n') self.tp = 1 else: self.tp = 1 self.write('<dl>') self.dl = 1 def macro_BL(self, line): self.write('<dl>') self.tp = 1 def macro_EL(self, line): if self.tp == 3: self.write('</dd>') self.tp = 1 self.write('</dl>\n\n') self.tp = 0 def macro_IT(self, line): if self.tp == 3: self.write('</dd>') self.tp = 1 self.continueReceived(line) def text(self, line): """ Handle a line of text without detected token. """ if self.tp == 1: self.write('<dt>') if self.tp == 2: self.write('<dd>') self.paraCheck() bits = line.split('\\') self.write(escape(bits[0])) for bit in bits[1:]: if bit[:2] == 'fI': self.write('<em>' + escape(bit[2:])) self.state = 'em' elif bit[:2] == 'fB': self.write('<strong>' + escape(bit[2:])) self.state = 'strong' elif bit[:2] == 'fR': self.write('</%s>' % self.state) self.write(escape(bit[2:])) self.state = 'regular' elif bit[:2] == 'fU': # fU doesn't really exist, but it helps us to manage underlined # text. self.write('<u>' + escape(bit[2:])) self.state = 'u' elif bit[:3] == '(co': self.write('&copy;' + escape(bit[3:])) else: self.write(escape(bit)) if self.tp == 1: self.write('</dt>') self.tp = 2 elif self.tp == 2: self.tp = 3 class ProcessingFunctionFactory: def generate_lore(self, d, filenameGenerator=None): ext = d.get('ext', '.html') return lambda file,_: ManConverter().convert(open(file), open(os.path.splitext(file)[0]+ext, 'w')) factory = ProcessingFunctionFactory() if __name__ == '__main__': import sys mc = ManConverter().convert(open(sys.argv[1]), sys.stdout)
bsd-3-clause
gabrielfalcao/lettuce
tests/integration/lib/Django-1.2.5/django/contrib/messages/storage/base.py
399
6134
from django.conf import settings from django.utils.encoding import force_unicode, StrAndUnicode from django.contrib.messages import constants, utils LEVEL_TAGS = utils.get_level_tags() class Message(StrAndUnicode): """ Represents an actual message that can be stored in any of the supported storage classes (typically session- or cookie-based) and rendered in a view or template. """ def __init__(self, level, message, extra_tags=None): self.level = int(level) self.message = message self.extra_tags = extra_tags def _prepare(self): """ Prepares the message for serialization by forcing the ``message`` and ``extra_tags`` to unicode in case they are lazy translations. Known "safe" types (None, int, etc.) are not converted (see Django's ``force_unicode`` implementation for details). """ self.message = force_unicode(self.message, strings_only=True) self.extra_tags = force_unicode(self.extra_tags, strings_only=True) def __eq__(self, other): return isinstance(other, Message) and self.level == other.level and \ self.message == other.message def __unicode__(self): return force_unicode(self.message) def _get_tags(self): label_tag = force_unicode(LEVEL_TAGS.get(self.level, ''), strings_only=True) extra_tags = force_unicode(self.extra_tags, strings_only=True) if extra_tags and label_tag: return u' '.join([extra_tags, label_tag]) elif extra_tags: return extra_tags elif label_tag: return label_tag return '' tags = property(_get_tags) class BaseStorage(object): """ This is the base backend for temporary message storage. This is not a complete class; to be a usable storage backend, it must be subclassed and the two methods ``_get`` and ``_store`` overridden. """ def __init__(self, request, *args, **kwargs): self.request = request self._queued_messages = [] self.used = False self.added_new = False super(BaseStorage, self).__init__(*args, **kwargs) def __len__(self): return len(self._loaded_messages) + len(self._queued_messages) def __iter__(self): self.used = True if self._queued_messages: self._loaded_messages.extend(self._queued_messages) self._queued_messages = [] return iter(self._loaded_messages) def __contains__(self, item): return item in self._loaded_messages or item in self._queued_messages @property def _loaded_messages(self): """ Returns a list of loaded messages, retrieving them first if they have not been loaded yet. """ if not hasattr(self, '_loaded_data'): messages, all_retrieved = self._get() self._loaded_data = messages or [] return self._loaded_data def _get(self, *args, **kwargs): """ Retrieves a list of stored messages. Returns a tuple of the messages and a flag indicating whether or not all the messages originally intended to be stored in this storage were, in fact, stored and retrieved; e.g., ``(messages, all_retrieved)``. **This method must be implemented by a subclass.** If it is possible to tell if the backend was not used (as opposed to just containing no messages) then ``None`` should be returned in place of ``messages``. """ raise NotImplementedError() def _store(self, messages, response, *args, **kwargs): """ Stores a list of messages, returning a list of any messages which could not be stored. One type of object must be able to be stored, ``Message``. **This method must be implemented by a subclass.** """ raise NotImplementedError() def _prepare_messages(self, messages): """ Prepares a list of messages for storage. """ for message in messages: message._prepare() def update(self, response): """ Stores all unread messages. If the backend has yet to be iterated, previously stored messages will be stored again. Otherwise, only messages added after the last iteration will be stored. """ self._prepare_messages(self._queued_messages) if self.used: return self._store(self._queued_messages, response) elif self.added_new: messages = self._loaded_messages + self._queued_messages return self._store(messages, response) def add(self, level, message, extra_tags=''): """ Queues a message to be stored. The message is only queued if it contained something and its level is not less than the recording level (``self.level``). """ if not message: return # Check that the message level is not less than the recording level. level = int(level) if level < self.level: return # Add the message. self.added_new = True message = Message(level, message, extra_tags=extra_tags) self._queued_messages.append(message) def _get_level(self): """ Returns the minimum recorded level. The default level is the ``MESSAGE_LEVEL`` setting. If this is not found, the ``INFO`` level is used. """ if not hasattr(self, '_level'): self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO) return self._level def _set_level(self, value=None): """ Sets a custom minimum recorded level. If set to ``None``, the default level will be used (see the ``_get_level`` method). """ if value is None and hasattr(self, '_level'): del self._level else: self._level = int(value) level = property(_get_level, _set_level, _set_level)
gpl-3.0
heracek/django-nonrel
django/contrib/localflavor/pl/pl_administrativeunits.py
433
13194
# -*- coding: utf-8 -*- """ Polish administrative units as in http://pl.wikipedia.org/wiki/Podzia%C5%82_administracyjny_Polski """ ADMINISTRATIVE_UNIT_CHOICES = ( ('wroclaw', u'Wrocław'), ('jeleniagora', u'Jelenia Góra'), ('legnica', u'Legnica'), ('boleslawiecki', u'bolesławiecki'), ('dzierzoniowski', u'dzierżoniowski'), ('glogowski', u'głogowski'), ('gorowski', u'górowski'), ('jaworski', u'jaworski'), ('jeleniogorski', u'jeleniogórski'), ('kamiennogorski', u'kamiennogórski'), ('klodzki', u'kłodzki'), ('legnicki', u'legnicki'), ('lubanski', u'lubański'), ('lubinski', u'lubiński'), ('lwowecki', u'lwówecki'), ('milicki', u'milicki'), ('olesnicki', u'oleśnicki'), ('olawski', u'oławski'), ('polkowicki', u'polkowicki'), ('strzelinski', u'strzeliński'), ('sredzki', u'średzki'), ('swidnicki', u'świdnicki'), ('trzebnicki', u'trzebnicki'), ('walbrzyski', u'wałbrzyski'), ('wolowski', u'wołowski'), ('wroclawski', u'wrocławski'), ('zabkowicki', u'ząbkowicki'), ('zgorzelecki', u'zgorzelecki'), ('zlotoryjski', u'złotoryjski'), ('bydgoszcz', u'Bydgoszcz'), ('torun', u'Toruń'), ('wloclawek', u'Włocławek'), ('grudziadz', u'Grudziądz'), ('aleksandrowski', u'aleksandrowski'), ('brodnicki', u'brodnicki'), ('bydgoski', u'bydgoski'), ('chelminski', u'chełmiński'), ('golubsko-dobrzynski', u'golubsko-dobrzyński'), ('grudziadzki', u'grudziądzki'), ('inowroclawski', u'inowrocławski'), ('lipnowski', u'lipnowski'), ('mogilenski', u'mogileński'), ('nakielski', u'nakielski'), ('radziejowski', u'radziejowski'), ('rypinski', u'rypiński'), ('sepolenski', u'sępoleński'), ('swiecki', u'świecki'), ('torunski', u'toruński'), ('tucholski', u'tucholski'), ('wabrzeski', u'wąbrzeski'), ('wloclawski', u'wrocławski'), ('zninski', u'źniński'), ('lublin', u'Lublin'), ('biala-podlaska', u'Biała Podlaska'), ('chelm', u'Chełm'), ('zamosc', u'Zamość'), ('bialski', u'bialski'), ('bilgorajski', u'biłgorajski'), ('chelmski', u'chełmski'), ('hrubieszowski', u'hrubieszowski'), ('janowski', u'janowski'), ('krasnostawski', u'krasnostawski'), ('krasnicki', u'kraśnicki'), ('lubartowski', u'lubartowski'), ('lubelski', u'lubelski'), ('leczynski', u'łęczyński'), ('lukowski', u'łukowski'), ('opolski', u'opolski'), ('parczewski', u'parczewski'), ('pulawski', u'puławski'), ('radzynski', u'radzyński'), ('rycki', u'rycki'), ('swidnicki', u'świdnicki'), ('tomaszowski', u'tomaszowski'), ('wlodawski', u'włodawski'), ('zamojski', u'zamojski'), ('gorzow-wielkopolski', u'Gorzów Wielkopolski'), ('zielona-gora', u'Zielona Góra'), ('gorzowski', u'gorzowski'), ('krosnienski', u'krośnieński'), ('miedzyrzecki', u'międzyrzecki'), ('nowosolski', u'nowosolski'), ('slubicki', u'słubicki'), ('strzelecko-drezdenecki', u'strzelecko-drezdenecki'), ('sulecinski', u'suleńciński'), ('swiebodzinski', u'świebodziński'), ('wschowski', u'wschowski'), ('zielonogorski', u'zielonogórski'), ('zaganski', u'żagański'), ('zarski', u'żarski'), ('lodz', u'Łódź'), ('piotrkow-trybunalski', u'Piotrków Trybunalski'), ('skierniewice', u'Skierniewice'), ('belchatowski', u'bełchatowski'), ('brzezinski', u'brzeziński'), ('kutnowski', u'kutnowski'), ('laski', u'łaski'), ('leczycki', u'łęczycki'), ('lowicki', u'łowicki'), ('lodzki wschodni', u'łódzki wschodni'), ('opoczynski', u'opoczyński'), ('pabianicki', u'pabianicki'), ('pajeczanski', u'pajęczański'), ('piotrkowski', u'piotrkowski'), ('poddebicki', u'poddębicki'), ('radomszczanski', u'radomszczański'), ('rawski', u'rawski'), ('sieradzki', u'sieradzki'), ('skierniewicki', u'skierniewicki'), ('tomaszowski', u'tomaszowski'), ('wielunski', u'wieluński'), ('wieruszowski', u'wieruszowski'), ('zdunskowolski', u'zduńskowolski'), ('zgierski', u'zgierski'), ('krakow', u'Kraków'), ('tarnow', u'Tarnów'), ('nowy-sacz', u'Nowy Sącz'), ('bochenski', u'bocheński'), ('brzeski', u'brzeski'), ('chrzanowski', u'chrzanowski'), ('dabrowski', u'dąbrowski'), ('gorlicki', u'gorlicki'), ('krakowski', u'krakowski'), ('limanowski', u'limanowski'), ('miechowski', u'miechowski'), ('myslenicki', u'myślenicki'), ('nowosadecki', u'nowosądecki'), ('nowotarski', u'nowotarski'), ('olkuski', u'olkuski'), ('oswiecimski', u'oświęcimski'), ('proszowicki', u'proszowicki'), ('suski', u'suski'), ('tarnowski', u'tarnowski'), ('tatrzanski', u'tatrzański'), ('wadowicki', u'wadowicki'), ('wielicki', u'wielicki'), ('warszawa', u'Warszawa'), ('ostroleka', u'Ostrołęka'), ('plock', u'Płock'), ('radom', u'Radom'), ('siedlce', u'Siedlce'), ('bialobrzeski', u'białobrzeski'), ('ciechanowski', u'ciechanowski'), ('garwolinski', u'garwoliński'), ('gostyninski', u'gostyniński'), ('grodziski', u'grodziski'), ('grojecki', u'grójecki'), ('kozienicki', u'kozenicki'), ('legionowski', u'legionowski'), ('lipski', u'lipski'), ('losicki', u'łosicki'), ('makowski', u'makowski'), ('minski', u'miński'), ('mlawski', u'mławski'), ('nowodworski', u'nowodworski'), ('ostrolecki', u'ostrołęcki'), ('ostrowski', u'ostrowski'), ('otwocki', u'otwocki'), ('piaseczynski', u'piaseczyński'), ('plocki', u'płocki'), ('plonski', u'płoński'), ('pruszkowski', u'pruszkowski'), ('przasnyski', u'przasnyski'), ('przysuski', u'przysuski'), ('pultuski', u'pułtuski'), ('radomski', u'radomski'), ('siedlecki', u'siedlecki'), ('sierpecki', u'sierpecki'), ('sochaczewski', u'sochaczewski'), ('sokolowski', u'sokołowski'), ('szydlowiecki', u'szydłowiecki'), ('warszawski-zachodni', u'warszawski zachodni'), ('wegrowski', u'węgrowski'), ('wolominski', u'wołomiński'), ('wyszkowski', u'wyszkowski'), ('zwolenski', u'zwoleński'), ('zurominski', u'żuromiński'), ('zyrardowski', u'żyrardowski'), ('opole', u'Opole'), ('brzeski', u'brzeski'), ('glubczycki', u'głubczyski'), ('kedzierzynsko-kozielski', u'kędzierzyński-kozielski'), ('kluczborski', u'kluczborski'), ('krapkowicki', u'krapkowicki'), ('namyslowski', u'namysłowski'), ('nyski', u'nyski'), ('oleski', u'oleski'), ('opolski', u'opolski'), ('prudnicki', u'prudnicki'), ('strzelecki', u'strzelecki'), ('rzeszow', u'Rzeszów'), ('krosno', u'Krosno'), ('przemysl', u'Przemyśl'), ('tarnobrzeg', u'Tarnobrzeg'), ('bieszczadzki', u'bieszczadzki'), ('brzozowski', u'brzozowski'), ('debicki', u'dębicki'), ('jaroslawski', u'jarosławski'), ('jasielski', u'jasielski'), ('kolbuszowski', u'kolbuszowski'), ('krosnienski', u'krośnieński'), ('leski', u'leski'), ('lezajski', u'leżajski'), ('lubaczowski', u'lubaczowski'), ('lancucki', u'łańcucki'), ('mielecki', u'mielecki'), ('nizanski', u'niżański'), ('przemyski', u'przemyski'), ('przeworski', u'przeworski'), ('ropczycko-sedziszowski', u'ropczycko-sędziszowski'), ('rzeszowski', u'rzeszowski'), ('sanocki', u'sanocki'), ('stalowowolski', u'stalowowolski'), ('strzyzowski', u'strzyżowski'), ('tarnobrzeski', u'tarnobrzeski'), ('bialystok', u'Białystok'), ('lomza', u'Łomża'), ('suwalki', u'Suwałki'), ('augustowski', u'augustowski'), ('bialostocki', u'białostocki'), ('bielski', u'bielski'), ('grajewski', u'grajewski'), ('hajnowski', u'hajnowski'), ('kolnenski', u'kolneński'), ('łomzynski', u'łomżyński'), ('moniecki', u'moniecki'), ('sejnenski', u'sejneński'), ('siemiatycki', u'siematycki'), ('sokolski', u'sokólski'), ('suwalski', u'suwalski'), ('wysokomazowiecki', u'wysokomazowiecki'), ('zambrowski', u'zambrowski'), ('gdansk', u'Gdańsk'), ('gdynia', u'Gdynia'), ('slupsk', u'Słupsk'), ('sopot', u'Sopot'), ('bytowski', u'bytowski'), ('chojnicki', u'chojnicki'), ('czluchowski', u'człuchowski'), ('kartuski', u'kartuski'), ('koscierski', u'kościerski'), ('kwidzynski', u'kwidzyński'), ('leborski', u'lęborski'), ('malborski', u'malborski'), ('nowodworski', u'nowodworski'), ('gdanski', u'gdański'), ('pucki', u'pucki'), ('slupski', u'słupski'), ('starogardzki', u'starogardzki'), ('sztumski', u'sztumski'), ('tczewski', u'tczewski'), ('wejherowski', u'wejcherowski'), ('katowice', u'Katowice'), ('bielsko-biala', u'Bielsko-Biała'), ('bytom', u'Bytom'), ('chorzow', u'Chorzów'), ('czestochowa', u'Częstochowa'), ('dabrowa-gornicza', u'Dąbrowa Górnicza'), ('gliwice', u'Gliwice'), ('jastrzebie-zdroj', u'Jastrzębie Zdrój'), ('jaworzno', u'Jaworzno'), ('myslowice', u'Mysłowice'), ('piekary-slaskie', u'Piekary Śląskie'), ('ruda-slaska', u'Ruda Śląska'), ('rybnik', u'Rybnik'), ('siemianowice-slaskie', u'Siemianowice Śląskie'), ('sosnowiec', u'Sosnowiec'), ('swietochlowice', u'Świętochłowice'), ('tychy', u'Tychy'), ('zabrze', u'Zabrze'), ('zory', u'Żory'), ('bedzinski', u'będziński'), ('bielski', u'bielski'), ('bierunsko-ledzinski', u'bieruńsko-lędziński'), ('cieszynski', u'cieszyński'), ('czestochowski', u'częstochowski'), ('gliwicki', u'gliwicki'), ('klobucki', u'kłobucki'), ('lubliniecki', u'lubliniecki'), ('mikolowski', u'mikołowski'), ('myszkowski', u'myszkowski'), ('pszczynski', u'pszczyński'), ('raciborski', u'raciborski'), ('rybnicki', u'rybnicki'), ('tarnogorski', u'tarnogórski'), ('wodzislawski', u'wodzisławski'), ('zawiercianski', u'zawierciański'), ('zywiecki', u'żywiecki'), ('kielce', u'Kielce'), ('buski', u'buski'), ('jedrzejowski', u'jędrzejowski'), ('kazimierski', u'kazimierski'), ('kielecki', u'kielecki'), ('konecki', u'konecki'), ('opatowski', u'opatowski'), ('ostrowiecki', u'ostrowiecki'), ('pinczowski', u'pińczowski'), ('sandomierski', u'sandomierski'), ('skarzyski', u'skarżyski'), ('starachowicki', u'starachowicki'), ('staszowski', u'staszowski'), ('wloszczowski', u'włoszczowski'), ('olsztyn', u'Olsztyn'), ('elblag', u'Elbląg'), ('bartoszycki', u'bartoszycki'), ('braniewski', u'braniewski'), ('dzialdowski', u'działdowski'), ('elblaski', u'elbląski'), ('elcki', u'ełcki'), ('gizycki', u'giżycki'), ('goldapski', u'gołdapski'), ('ilawski', u'iławski'), ('ketrzynski', u'kętrzyński'), ('lidzbarski', u'lidzbarski'), ('mragowski', u'mrągowski'), ('nidzicki', u'nidzicki'), ('nowomiejski', u'nowomiejski'), ('olecki', u'olecki'), ('olsztynski', u'olsztyński'), ('ostrodzki', u'ostródzki'), ('piski', u'piski'), ('szczycienski', u'szczycieński'), ('wegorzewski', u'węgorzewski'), ('poznan', u'Poznań'), ('kalisz', u'Kalisz'), ('konin', u'Konin'), ('leszno', u'Leszno'), ('chodzieski', u'chodziejski'), ('czarnkowsko-trzcianecki', u'czarnkowsko-trzcianecki'), ('gnieznienski', u'gnieźnieński'), ('gostynski', u'gostyński'), ('grodziski', u'grodziski'), ('jarocinski', u'jarociński'), ('kaliski', u'kaliski'), ('kepinski', u'kępiński'), ('kolski', u'kolski'), ('koninski', u'koniński'), ('koscianski', u'kościański'), ('krotoszynski', u'krotoszyński'), ('leszczynski', u'leszczyński'), ('miedzychodzki', u'międzychodzki'), ('nowotomyski', u'nowotomyski'), ('obornicki', u'obornicki'), ('ostrowski', u'ostrowski'), ('ostrzeszowski', u'ostrzeszowski'), ('pilski', u'pilski'), ('pleszewski', u'pleszewski'), ('poznanski', u'poznański'), ('rawicki', u'rawicki'), ('slupecki', u'słupecki'), ('szamotulski', u'szamotulski'), ('sredzki', u'średzki'), ('sremski', u'śremski'), ('turecki', u'turecki'), ('wagrowiecki', u'wągrowiecki'), ('wolsztynski', u'wolsztyński'), ('wrzesinski', u'wrzesiński'), ('zlotowski', u'złotowski'), ('bialogardzki', u'białogardzki'), ('choszczenski', u'choszczeński'), ('drawski', u'drawski'), ('goleniowski', u'goleniowski'), ('gryficki', u'gryficki'), ('gryfinski', u'gryfiński'), ('kamienski', u'kamieński'), ('kolobrzeski', u'kołobrzeski'), ('koszalinski', u'koszaliński'), ('lobeski', u'łobeski'), ('mysliborski', u'myśliborski'), ('policki', u'policki'), ('pyrzycki', u'pyrzycki'), ('slawienski', u'sławieński'), ('stargardzki', u'stargardzki'), ('szczecinecki', u'szczecinecki'), ('swidwinski', u'świdwiński'), ('walecki', u'wałecki'), )
bsd-3-clause
blab/stability
augur/src/HI_predictability.py
2
26929
###### # script that explores the predictive power of inferred antigenic change # It tree and mutation models inferred in intervals of 10years for H3N2 # ###### from collections import defaultdict from diagnostic_figures import large_effect_mutations, figheight from itertools import izip from H3N2_process import H3N2_process, virus_config from diagnostic_figures import get_slope import matplotlib.pyplot as plt from matplotlib import cm import numpy as np from scipy.stats import ks_2samp from fitness_tolerance import * import seaborn as sns plt.ion() fig_fontsize=14 fs =fig_fontsize params = { 'lam_HI':1.0, 'lam_avi':2.0, 'lam_pot':0.3, 'prefix':'H3N2_', 'serum_Kc':0.003, } def add_panel_label(ax,label, x_offset=-0.1): '''add one letter labels to the upper left corner of a figure A, B, C etc ''' ax.text(x_offset, 0.95, label, transform=ax.transAxes, fontsize=fig_fontsize*1.5) def select_nodes_in_season(tree, interval): '''mark all nodes in a time interval specified by decimalized years, e.g. 2012.34 ''' for node in tree.leaf_iter(): # mark leafs if node.num_date>=interval[0] and node.num_date<interval[1]: node.alive=True node.n_alive = 1 else: node.alive=False node.n_alive = 0 for node in tree.postorder_internal_node_iter(): # go over all internal nodes: alive iff at least one child alive node.alive = any([n.alive for n in node.child_nodes()]) node.n_alive = np.sum([n.n_alive for n in node.child_nodes()]) def calc_LBI(tree, LBI_tau = 0.0005, attr = 'lb'): ''' traverses the tree in postorder and preorder to calculate the up and downstream tree length exponentially weighted by distance. then adds them as LBI tree -- dendropy tree for whose node the LBI is being computed attr -- the attribute name used to store the result ''' min_bl = 0.00005 # traverse the tree in postorder (children first) to calculate msg to parents for node in tree.postorder_node_iter(): node.down_polarizer = 0 node.up_polarizer = 0 for child in node.child_nodes(): node.up_polarizer += child.up_polarizer bl = max(min_bl, node.edge_length)/LBI_tau node.up_polarizer *= np.exp(-bl) if node.alive: node.up_polarizer += LBI_tau*(1-np.exp(-bl)) # traverse the tree in preorder (parents first) to calculate msg to children for node in tree.preorder_internal_node_iter(): for child1 in node.child_nodes(): child1.down_polarizer = node.down_polarizer for child2 in node.child_nodes(): if child1!=child2: child1.down_polarizer += child2.up_polarizer bl = max(min_bl, child1.edge_length)/LBI_tau child1.down_polarizer *= np.exp(-bl) if child1.alive: child1.down_polarizer += LBI_tau*(1-np.exp(-bl)) # go over all nodes and calculate the LBI (can be done in any order) max_LBI = 0 for node in tree.postorder_node_iter(): tmp_LBI = node.down_polarizer for child in node.child_nodes(): tmp_LBI += child.up_polarizer node.__setattr__(attr, tmp_LBI) if tmp_LBI>max_LBI: max_LBI=tmp_LBI return max_LBI ''' mutation model goes over different intervals and fits the HI model ''' mut_models = True save_figs = True if mut_models: resolutions = ['1985to1995','1990to2000','1995to2005','2000to2010','2005to2016'] fig, axs = plt.subplots(1,len(resolutions), sharey=True, figsize=(4*figheight, 1.3*figheight)) cols={} HI_distributions_mutations = [] #### make a plot of trajectories colored by HI effect for res,ax in izip(resolutions,axs): params['pivots_per_year'] = 6.0 params['resolution']=res params['time_interval'] = map(float, res.split('to')) #params['time_interval'] = [2015.8-int(res[:-1]), 2015.8] if params['time_interval'][1]>2015: params['time_interval'][1]=2015.8 # add all arguments to virus_config (possibly overriding) virus_config.update(params) # pass all these arguments to the processor: will be passed down as kwargs through all classes myH3N2 = H3N2_process(**virus_config) myH3N2.run(['HI'], lam_HI = virus_config['lam_HI'], lam_avi = virus_config['lam_avi'], lam_pot = virus_config['lam_pot'], ) cols = large_effect_mutations(myH3N2, ax, cols) # plot the mutation trajectories into a multi panel figure for mut in myH3N2.mutation_effects: # for each mutation, make a list of mutation, effect and frequency trajectory HI = myH3N2.mutation_effects[mut] mutlabel = mut[0]+':'+mut[1][1:] if mutlabel in myH3N2.frequencies["mutations"]["global"]: HI_distributions_mutations.append([res, mut, HI, np.array(myH3N2.frequencies["mutations"]["global"][mutlabel])]) else: print("no frequencies for ",mut, 'HI', HI) continue print(len(HI_distributions_mutations)) if save_figs: plt.savefig("prediction_figures/"+"trajectories_mutations.pdf") ### make cumulative distribution of HI titers that fix or don't freq_thres = 0.5 # minimal freq HI_threshold =0.1 # minimal HI effect fixed = np.array([ HI for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>freq_thres and HI>HI_threshold]) # condition on initially rare failed = np.array([ HI for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()<freq_thres and HI>HI_threshold]) D, p = ks_2samp(fixed, failed) print("HI distribution of fixed and failed, KS stat:", D, "p-val:",p) # plt cumulative distributions plt.figure() plt.plot(sorted(fixed), np.linspace(0,1,len(fixed)), label = '>'+str(freq_thres)+' n='+str(len(fixed))) plt.plot(sorted(failed), np.linspace(0,1,len(failed)), label = '<'+str(freq_thres)+' n='+str(len(failed))) plt.xlabel('HI effect') plt.ylabel('cumulative distribution') plt.legend(loc=4) if save_figs: plt.savefig("prediction_figures/"+"cumulative_HI_mutations.pdf") ################################################################ ##### fraction successful ################################################################ plt.figure(figsize = (1.6*figheight, figheight)) ax3 = plt.subplot(1,1,1) HI_max = np.array([[HI, freq.max()] for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>0.1]) nreps=100 HI_threshold = np.array([0.0, 0.3, 0.7, 1.2, 4]) #defining HI categories HI_binc = 0.5*(HI_threshold[:-1] + HI_threshold[1:]) for fi,freq_thres in enumerate([0.25, 0.5, 0.75, 0.95]): frac_success = [] stddev_success = [] for HI_lower, HI_upper in zip(HI_threshold[:-1], HI_threshold[1:]): ind = (HI_max[:,0]>=HI_lower)&(HI_max[:,0]<HI_upper) vals = HI_max[ind,1] tmp = [] for rep in xrange(nreps): tmp_vals = vals[np.random.randint(len(vals), size=len(vals)/2)] tmp.append((tmp_vals>freq_thres).mean()) stddev_success.append(np.std(tmp)) print(HI_lower, ind.sum()) frac_success.append((HI_max[ind,1]>freq_thres).mean()) ax3.errorbar(np.arange(len(frac_success))+0.5+0.03*fi, frac_success,stddev_success, label = "max freq >"+str(freq_thres), lw=2) ax3.set_xlabel('HI effect', fontsize=fs) ax3.set_ylabel('fraction reaching frequency threshold', fontsize=fs) ax3.tick_params(labelsize=fs) ax3.set_xticks(np.arange(len(HI_binc))+0.5) ax3.set_xticklabels([str(lower)+'-'+str(upper) for lower, upper in zip(HI_threshold[:-1], HI_threshold[1:])]) plt.legend(loc=8, fontsize=fs) plt.ylim([0,1]) plt.xlim([0,len(HI_binc)]) plt.tight_layout() if save_figs: plt.savefig("prediction_figures/"+'fraction_successful.pdf') ### make cumulative HI on backbone HI_backbone = np.array([HI for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>0.75]) HI_backbone.sort() plt.figure(figsize = (1.2*figheight, figheight)) cumHI = HI_backbone.cumsum() plt.plot(HI_backbone, cumHI/cumHI[-1]) plt.ylabel('fraction of cHI due to effects < cutoff', fontsize = fs) plt.xlabel('effect size', fontsize = fs) plt.tick_params(labelsize=fs) plt.tight_layout() if save_figs: plt.savefig("prediction_figures/"+'cumulative_HI_effects.pdf') ''' analyze the tree model This uses the 30 year span and investigates whether antigenic advance (as measured by cHI) is predictive of clade success. ''' res = '1985to2016' params['pivots_per_year'] = 3.0 params['resolution']=res params['time_interval'] = map(float, res.split('to')) if params['time_interval'][1]>2015: params['time_interval'][1]=2015.8 # set the upper time limit to past the last sequence # add all arguments to virus_config (possibly overriding) virus_config.update(params) # pass all these arguments to the processor: will be passed down as kwargs through all classes myH3N2 = H3N2_process(**virus_config) myH3N2.load() # assign dates to internal nodes as minimum for children. this is needed to calculate the recent for node in myH3N2.tree.postorder_internal_node_iter(): node.num_date = np.min([c.num_date for c in node.child_nodes()]) assign_fitness(myH3N2.tree) dates_fitness = np.array([(n.num_date, n.tol) for n in myH3N2.tree.postorder_internal_node_iter()]) dates_fitness_term = np.array([(n.num_date, n.tol) for n in myH3N2.tree.leaf_iter()]) pivots = myH3N2.tree.seed_node.pivots HI_vs_max_freq_tree = [] dt=0.5 for node in myH3N2.tree.postorder_internal_node_iter(): if node.num_date<1987: continue if node.freq["global"] is not None and node.freq["global"].max()>0.05: p = node.parent_node cHI = node.dHI while p is not None and (node.num_date - p.num_date)<dt: cHI += p.dHI p = p.parent_node ind = (dates_fitness_term[:,0]<=node.num_date)&(dates_fitness_term[:,0]>node.num_date-dt) HI_vs_max_freq_tree.append((cHI, np.array(node.freq["global"]))) freq_clusters = [] globbing_thres=0.25 print("combining trajectories") for cHI, freq in HI_vs_max_freq_tree: found = False for fi, (cHIs, cfreqs) in enumerate(freq_clusters): max_freq = np.mean(cfreqs, axis=0).max()+0.1 if np.max(np.abs(freq - np.mean(cfreqs, axis=0)))/max_freq<globbing_thres: freq_clusters[fi][1].append(freq) freq_clusters[fi][0].append(cHI) found=True if not found: print ("adding new cluster") freq_clusters.append([[cHI], [freq]]) print("generated",len(freq_clusters), " trajectory clusters") freq_thres = 0.75 fixed = np.array([ max(HI) for HI, freqs in freq_clusters if np.max(np.mean(freqs, axis=0))>freq_thres and max(HI)>0.01]) failed = np.array([ max(HI) for HI, freqs in freq_clusters if np.max(np.mean(freqs, axis=0))<freq_thres and max(HI)>0.01]) print("split into lists of failed and successful trajectories") D, p = ks_2samp(fixed, failed) print("KS stat:", D, "p-val:",p) plt.figure() plt.plot(sorted(fixed), np.linspace(0,1,len(fixed)), label = '>'+str(freq_thres)+' n='+str(len(fixed))) plt.plot(sorted(failed), np.linspace(0,1,len(failed)), label = '<'+str(freq_thres)+' n='+str(len(failed))) plt.xlabel('HI effect') plt.ylabel('cumulative distribution') plt.legend(loc=4) if save_figs: plt.savefig("prediction_figures/"+"cumulative_HI_tree.pdf") ################################################################ #### plot tree frequencies ################################################################ fs=14 HI_cutoff=0.1 mycmap = cm.cool plt.figure(figsize=(3*figheight, figheight)) ax1 = plt.subplot(1,1,1) for cHIs, cfreqs in freq_clusters: if max(cHIs)>HI_cutoff: ax1.plot(pivots,np.mean(cfreqs, axis=0), c=mycmap(np.sqrt(np.max(cHIs))/2)) sm = plt.cm.ScalarMappable(cmap=mycmap, norm=plt.Normalize(vmin=0, vmax=2)) # fake up the array of the scalar mappable. Urgh... sm._A = [] cb = plt.colorbar(sm) cb.set_ticks(np.sqrt([0, 0.3, 1, 2,4])) cb.set_ticklabels(map(str, [0, 0.3, 1, 2,4])) cb.set_label('HI effect', fontsize=fs) ax1.set_ylabel('frequency', fontsize=fs) ax1.set_xlabel('year', fontsize=fs) ax1.set_xlim([1985,2016]) ax1.tick_params(labelsize=fs) plt.tight_layout() add_panel_label(ax1, "A", x_offset=-0.07) if save_figs: plt.savefig("prediction_figures/"+"trajectories_tree.pdf") ''' the following is obsolete code that was used to plot the fraction of successful clades vs their antigenic effect ''' ################################################################ ##### add fraction successful ################################################################ #plt.figure(figsize=(2.4*figheight, figheight)) #ax2 = plt.subplot2grid((1,2),(0,0)) #plt.title("tree model", fontsize=fs) ##HI_threshold = np.array([0.1, 0.3, 0.8, 1.5, 4]) #HI_threshold = np.array([0.0, 0.3, 0.8, 1.5, 4]) #HI_binc = 0.5*(HI_threshold[:-1]+HI_threshold[1:]) #HI_max = np.array([[np.max(HI), np.max(np.mean(freqs, axis=0))] for HI, freqs in freq_clusters]) #for freq_thres in [0.5, 0.75, 0.95]: # frac_success = [] # for HI_lower, HI_upper in zip(HI_threshold[:-1], HI_threshold[1:]): # ind = (HI_max[:,0]>=HI_lower)&(HI_max[:,0]<HI_upper) # print(HI_lower, ind.sum()) # frac_success.append((HI_max[ind,1]>freq_thres).mean()) # ax2.plot(np.arange(len(frac_success))+0.5, frac_success, 'o-', label = "max freq >"+str(freq_thres)) # #ax2.set_xlabel('HI effect', fontsize=fs) #ax2.set_ylabel('fraction reaching frequency threshold', fontsize=fs) #ax2.tick_params(labelsize=fs) #ax2.set_xticks(np.arange(len(HI_binc))+0.5) #ax2.set_xticklabels([str(lower)+'-'+str(upper) for lower, upper in zip(HI_threshold[:-1], HI_threshold[1:])]) #plt.legend(loc=4, fontsize=fs) #plt.ylim([0,1]) #plt.xlim([0,len(HI_binc)]) # #ax3 = plt.subplot2grid((1,2),(0,1)) #plt.title("mutation model", fontsize=fs) #HI_max = np.array([[HI, freq.max()] for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>0.01]) #for freq_thres in [0.25, 0.5, 0.75, 0.95]: # frac_success = [] # for HI_lower, HI_upper in zip(HI_threshold[:-1], HI_threshold[1:]): # ind = (HI_max[:,0]>=HI_lower)&(HI_max[:,0]<HI_upper) # print(HI_lower, ind.sum()) # frac_success.append((HI_max[ind,1]>freq_thres).mean()) # ax3.plot(np.arange(len(frac_success))+0.5, frac_success, 'o-', label = "max freq >"+str(freq_thres)) # #ax3.set_xlabel('HI effect', fontsize=fs) #ax3.set_ylabel('fraction reaching frequency threshold', fontsize=fs) #ax3.tick_params(labelsize=fs) #ax3.set_xticks(np.arange(len(HI_binc))+0.5) #ax3.set_xticklabels([str(lower)+'-'+str(upper) for lower, upper in zip(HI_threshold[:-1], HI_threshold[1:])]) #plt.legend(loc=4, fontsize=fs) #plt.ylim([0,1]) #plt.xlim([0,len(HI_binc)]) # #plt.tight_layout() #if save_figs: # plt.savefig("prediction_figures/"+'combined_HI_dynamics.pdf') # # ######################################################################### ##### the following implements bona fide prediction by estimating HI models ##### and LBI for viruses sampled up to a time cutoff and examining the next season ######################################################################### gof_by_year = [] alpha = 'ACGT' def allele_freqs(seqs): ''' alignment -> nucleotide frequencies ''' tmp_seqs = np.array([np.fromstring(seq, 'S1') for seq in seqs]) af = np.zeros((4,tmp_seqs.shape[1])) for ni,nuc in enumerate(alpha): af[ni,:] = (tmp_seqs==nuc).mean(axis=0) return af def af_dist(af1, af2): ''' average distance between two population characterized by nucleotide frequencies af1 and af2''' return 1-(af1*af2).sum(axis=0).mean(axis=0) # distance == 1-probability of being the same def seq_dist(seq, af): ''' average distance betweeen a populiation characterized by nucleotide frequencies and a sequence''' ind = np.array([alpha.index(nuc) for nuc in seq]) #calculate the indices in the frequency array that correspond to the sequence state return 1.0-np.mean(af[ind, np.arange(len(seq))]) #1 - probability of being the same # frequency cutoffs for a clade to be included cutoffs = [0.01, 0.03, 0.05, 0.1] # set up dictionaries to remember the highest scoring clades for sets defined by each of the cutoffs LBI_HI_by_date = {cutoff:[] for cutoff in cutoffs} best_scores = {cutoff:[] for cutoff in cutoffs} best_LBI = {cutoff:[] for cutoff in cutoffs} best_HI = {cutoff:[] for cutoff in cutoffs} best_HI_vs_HI_of_best = {cutoff:[] for cutoff in cutoffs} # loop over all years we want to include for year in range(1990, 2015): print("#############################################") print("### YEAR:",year) print("#############################################") # train the HI model and remember some basic figures about the fit myH3N2.map_HI(training_fraction = 1.0, method = 'nnl1reg', lam_HI=params['lam_HI'], map_to_tree = True, lam_pot = params['lam_pot'], lam_avi = params['lam_avi'], cutoff_date = year+2./12.0, subset_strains = False, force_redo = True) gof_by_year.append((year, myH3N2.fit_error, myH3N2.tree_graph.shape[0])) # take and allele frequency snapshot of future season Sept until June select_nodes_in_season(myH3N2.tree, (year+9.0/12, year+18.0/12)) future_seqs = [node.seq for node in myH3N2.tree.leaf_iter() if node.alive] future_af = allele_freqs(future_seqs) #current season May until Feb (all previously selected nodes will be erased) select_nodes_in_season(myH3N2.tree, (year-7.0/12, year+2.0/12)) af = allele_freqs([node.seq for node in myH3N2.tree.leaf_iter() if node.alive]) avg_dist = af_dist(af, future_af) max_LBI = calc_LBI(myH3N2.tree, LBI_tau = 0.001) total_alive = 1.0*myH3N2.tree.seed_node.n_alive # loop over the different frequency cut-offs for cutoff in cutoffs: # make a list of nodes (clades) that are used for prediction. frequency needs to be >cutoff and <0.95 nodes = [node for node in myH3N2.tree.postorder_node_iter() if node.alive and node.n_alive/total_alive>cutoff and node.n_alive/total_alive<0.95] # determine the minimal distance to future, the average cumulative antigenic advance, and the best possible node all_distance_to_future = np.array([seq_dist(node.seq, future_af) for node in nodes]) best = np.argmin(all_distance_to_future) min_dist = all_distance_to_future[best] current_cHI = np.mean([n.cHI for n in nodes]) # determine the nodes with the highest LBI and the highest HI all_LBI = np.array([n.lb for n in nodes]) best_LBI_node = nodes[np.argmax(all_LBI)] best_HI_node = nodes[np.argmax([n.cHI for n in nodes])] # remember the LBI and HI of the best node best_scores[cutoff].append([year, nodes[best].lb/max_LBI, nodes[best].cHI-current_cHI]) # remember the LBI and HI, normalized (d/avg_d), standardized (d-min_d)/(avg_d-min_d), and min_d, avg_d for node with highest LBI best_LBI[cutoff].append((year, best_LBI_node.lb/max_LBI, best_LBI_node.cHI-current_cHI, (seq_dist(best_LBI_node.seq, future_af)-min_dist)/(avg_dist-min_dist), seq_dist(best_LBI_node.seq, future_af)/avg_dist, min_dist, avg_dist)) # remember the LBI and HI, normalized (d/avg_d), standardized (d-min_d)/(avg_d-min_d), and min_d, avg_d for node with highest HI best_HI[cutoff].append((year, best_HI_node.lb/max_LBI, best_HI_node.cHI-current_cHI, (seq_dist(best_HI_node.seq, future_af)-min_dist)/(avg_dist-min_dist), seq_dist(best_HI_node.seq, future_af)/avg_dist, min_dist, avg_dist)) # remember HI of the node closest to the future and the HI of the node iwth the highest HI best_HI_vs_HI_of_best[cutoff].append((year, best_HI_node.cHI - current_cHI, nodes[best].cHI - current_cHI)) print(year, "avg_dist", avg_dist) # make a list of the LBI, cHI, and distances for every node in the set belonging to cutoffs. (used for scattering LBI vs HI) for node in nodes: node_freq = node.n_alive/total_alive if node.freq["global"] is not None: tmp_freq = np.array(node.freq["global"]) ii = pivots.searchsorted(year+0.2) nii= pivots.searchsorted(year+1.0) LBI_HI_by_date[cutoff].append((node, year, node.lb/max_LBI, node.cHI-current_cHI, (seq_dist(node.seq, future_af)-min_dist)/(avg_dist-min_dist),node_freq, tmp_freq[ii], tmp_freq[nii], tmp_freq)) else: #print("missing frequency", year, node.n_alive) LBI_HI_by_date[cutoff].append((node, year, node.lb/max_LBI, node.cHI-current_cHI, (seq_dist(node.seq, future_af)-min_dist)/(avg_dist-min_dist), node_freq, 0,0,0)) # make an array out of all values for slicing and plotting clades = {} for cutoff in cutoffs: best_scores[cutoff] = np.array(best_scores[cutoff]) best_LBI[cutoff] = np.array(best_LBI[cutoff]) best_HI[cutoff] = np.array(best_HI[cutoff]) best_HI_vs_HI_of_best[cutoff] = np.array(best_HI_vs_HI_of_best[cutoff]) tmp = [] for cl in LBI_HI_by_date[cutoff]: tmp.append(cl[1:-1]) # exclude node (entry 0) and frequencies (entry -1) since they aren't numbers clades[cutoff] = np.array(tmp) ############ # make figure that shows the distance to future season for all years # comparing LBI and HI at different cutoffs ############ cutoff = 0.01 plt.figure(figsize=(2.4*figheight,figheight)) ax=plt.subplot(121) # -2 == mindist, -1 == avg_dits -3 == dist/avg_dist ax.plot(best_HI[cutoff][:,0],best_HI[cutoff][:,-2]/best_HI[cutoff][:,-1], label='best',lw=2, c='k') ax.plot(best_LBI[cutoff][:,0],best_LBI[cutoff][:,-3], label='LBI',lw=2) ax.plot(best_HI[0.05][:,0],best_HI[0.05][:,-3], label='cHI >0.05',lw=2) ax.plot(best_HI[0.01][:,0],best_HI[0.01][:,-3], label='cHI >0.01',lw=2) ax.plot([1990, 2015], [1.0, 1.0], lw=3, c='k', ls='--') ax.tick_params(labelsize=fs) add_panel_label(ax, "B", x_offset=-0.12) ax.set_xlabel('year', fontsize=fs) ax.set_ylabel('distance to season year/year+1', fontsize=fs) ax.set_yticks([0,0.5, 1.0, 1.5]) plt.legend(loc=2, fontsize=fs) ############ # second panel showing the distribytion of HI values of the best nodes ############ cols = sns.color_palette(n_colors=5) symbs = ['o','d','v','^','<'] ax = plt.subplot(122) ax.hist(best_scores[0.05][:,-1]) add_panel_label(ax, "C") ax.set_yticks([0,2,4,6, 8]) ax.set_ylim([0,10]) ax.set_ylabel("#years", fontsize=fs) ax.set_xlabel(r'$cHI-\langle cHI\rangle_{year}$', fontsize=fs) ax.tick_params(labelsize=fs) plt.tight_layout() if save_figs: plt.savefig("prediction_figures/"+'LBI_and_HI_vs_distance.pdf') #fig, axs = plt.subplots(1,3, figsize=(3.0*figheight, figheight)) #lbi_cutoff = 0.2 #for ax, qi in izip(axs,[1,2]): # for yi,year in enumerate(range(1990,2015)): # ind = (clades[:,0]==year)&((clades[:,-3]>lbi_cutoff)|(clades[:,2]>.5)) #restrict to clades larger than cutoff # if ind.sum()==0: # continue # lstr = str(year) if (year<1998 and qi==1) or (year>=1998 and year<2006 and qi==2) else None # ax.scatter(clades[ind,qi], clades[ind,3], c=cols[yi%5], marker=symbs[yi//5], s=50, label=lstr) # print(cols[yi%5]) # x_str = r'$cHI-\langle cHI\rangle_{year}$' if qi==2 else r'$LBI/\max(LBI)$' # ax.set_xlabel(x_str, fontsize=fs) # ax.tick_params(labelsize=fs) # ax.set_xlim((0.2,1.4) if qi==1 else (-3,3)) # ax.set_xticks((0.25,0.5, 0.75, 1.0) if qi==1 else (-2,-1,0,1,2)) # ax.set_ylim((-0.2,2.5)) # if qi<3: # ax.set_yticks([0, 0.5,1.0, 1.5, 2.0]) # ax.set_ylabel(r'distance to season year/year+1', fontsize=fs) # ax.legend() # add_panel_label(ax, "C" if qi==2 else "B", x_offset=-0.12) # #ax = axs[2] #for yi, (year, lbi, cHI) in enumerate(best_scores): # lstr = str(year) if (year>=2006) else None # ax.scatter([lbi], [cHI], c=cols[yi%5], marker=symbs[yi//5], s=50, label=lstr) #ax.set_xlabel(r'$LBI/\max(LBI)$', fontsize=fs) #ax.set_ylabel(r'$cHI-\langle cHI\rangle_{year}$', fontsize=fs) #ax.set_xlim([0, 1.1]) #ax.set_xticks([0, 0.25, 0.5, 0.75, 1]) #ax.set_yticks([-0.5, 0, 0.5, 1, 1.5]) #ax.legend() ################################################################# ### plot best HI vs HI of best ################################################################ plt.figure(figsize = (1.2*figheight, figheight)) ax=plt.subplot(111) for col, cutoff in zip(['b', 'g'], [0.01, 0.05]): plt.scatter(best_HI_vs_HI_of_best[cutoff][:,1], best_HI_vs_HI_of_best[cutoff][:,2], label = '>'+str(cutoff), s=50, c=col) #, s=50*best_HI[:,-3]) plt.plot([0,3],[0,3]) plt.tick_params(labelsize=fs) plt.xlabel(r'maximal $cHI-\langle cHI\rangle_{year}$', fontsize=fs) plt.ylabel(r'successful $cHI-\langle cHI\rangle_{year}$', fontsize=fs) plt.xticks([0,1,2,3,4]) plt.yticks([-1, 0,1,2,3]) plt.legend(loc=2) plt.tight_layout() if save_figs: plt.savefig("prediction_figures/"+'best_HI_vs_HI_of_best.pdf') ################################################################ ### scatter plot of LBI vs HI ################################################################ plt.figure(figsize=(2.4*figheight, figheight)) mycmap=cm.Set1 cutoff = 0.01 for li,lbi_cutoff in enumerate([0.2, 0.1]): ax = plt.subplot(1,2,li+1) ind = clades[cutoff][:,-3]>lbi_cutoff #restrict to clades larger than cutoff if ind.sum()==0: continue ax.set_title('clades >'+str(lbi_cutoff)+' frequency') ax.scatter(clades[cutoff][ind,1], clades[cutoff][ind,2], c=mycmap((clades[cutoff][ind,0]-1990)/25.0), s=80*(1-clades[cutoff][ind,3])**2) ax.set_ylabel(r'$cHI-\langle cHI\rangle_{year}$', fontsize=fs) ax.set_xlabel(r'$LBI/\max(LBI)$', fontsize=fs) ax.tick_params(labelsize=fs) ax.set_yticks([-3,-2,-1,0,1,2]) add_panel_label(ax, "C" if li else "B", x_offset=-0.15) if li: sm = plt.cm.ScalarMappable(cmap=mycmap, norm=plt.Normalize(vmin=1990, vmax=2015)) sm._A = [] cb = plt.colorbar(sm) cb.set_ticks([1990,1995,2000, 2005, 2010, 2015]) cb.set_label('year', fontsize=fs) plt.tight_layout() if save_figs: plt.savefig("prediction_figures/"+'LBI_HI.pdf')
agpl-3.0
SickGear/SickGear
lib/feedparser_py2/urls.py
3
5628
# Copyright 2010-2020 Kurt McKee <contactme@kurtmckee.org> # Copyright 2002-2008 Mark Pilgrim # All rights reserved. # # This file is a part of feedparser. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import from __future__ import unicode_literals import re try: import urllib.parse as urlparse except ImportError: import urlparse as urlparse from .html import _BaseHTMLProcessor # If you want feedparser to allow all URL schemes, set this to () # List culled from Python's urlparse documentation at: # http://docs.python.org/library/urlparse.html # as well as from "URI scheme" at Wikipedia: # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme # Many more will likely need to be added! ACCEPTABLE_URI_SCHEMES = ( 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet', 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', # Additional common-but-unofficial schemes 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', ) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) try: uri = urlparse.urljoin(base, uri) except ValueError: uri = '' return uri def convert_to_idn(url): """Convert a URL to IDN notation""" # this function should only be called with a unicode string # strategy: if the host cannot be encoded in ascii, then # it'll be necessary to encode it in idn form parts = list(urlparse.urlsplit(url)) try: parts[1].encode('ascii') except UnicodeEncodeError: # the url needs to be converted to idn notation host = parts[1].rsplit(':', 1) newhost = [] port = '' if len(host) == 2: port = host.pop() for h in host[0].split('.'): newhost.append(h.encode('idna').decode('utf-8')) parts[1] = '.'.join(newhost) if port: parts[1] += ':' + port return urlparse.urlunsplit(parts) else: return url def make_safe_absolute_uri(base, rel=None): # bail if ACCEPTABLE_URI_SCHEMES is empty if not ACCEPTABLE_URI_SCHEMES: return _urljoin(base, rel or '') if not base: return rel or '' if not rel: try: scheme = urlparse.urlparse(base)[0] except ValueError: return '' if not scheme or scheme in ACCEPTABLE_URI_SCHEMES: return base return '' uri = _urljoin(base, rel) if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: return '' return uri class RelativeURIResolver(_BaseHTMLProcessor): relative_uris = { ('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('audio', 'src'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src'), ('source', 'src'), ('video', 'poster'), ('video', 'src'), } def __init__(self, baseuri, encoding, _type): _BaseHTMLProcessor.__init__(self, encoding, _type) self.baseuri = baseuri def resolve_uri(self, uri): return make_safe_absolute_uri(self.baseuri, uri.strip()) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolve_uri(value) or value) for key, value in attrs] super(RelativeURIResolver, self).unknown_starttag(tag, attrs) def resolve_relative_uris(html_source, base_uri, encoding, type_): p = RelativeURIResolver(base_uri, encoding, type_) p.feed(html_source) return p.output()
gpl-3.0
braintreeps/moto
moto/kms/responses.py
2
9653
from __future__ import unicode_literals import base64 import json import re from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException from moto.core.responses import BaseResponse from .models import kms_backends reserved_aliases = [ 'alias/aws/ebs', 'alias/aws/s3', 'alias/aws/redshift', 'alias/aws/rds', ] class KmsResponse(BaseResponse): @property def parameters(self): return json.loads(self.body.decode("utf-8")) @property def kms_backend(self): return kms_backends[self.region] def create_key(self): policy = self.parameters.get('Policy') key_usage = self.parameters.get('KeyUsage') description = self.parameters.get('Description') key = self.kms_backend.create_key(policy, key_usage, description, self.region) return json.dumps(key.to_dict()) def describe_key(self): key_id = self.parameters.get('KeyId') try: key = self.kms_backend.describe_key(self.kms_backend.get_key_id(key_id)) except KeyError: headers = dict(self.headers) headers['status'] = 404 return "{}", headers return json.dumps(key.to_dict()) def list_keys(self): keys = self.kms_backend.list_keys() return json.dumps({ "Keys": [ { "KeyArn": key.arn, "KeyId": key.id, } for key in keys ], "NextMarker": None, "Truncated": False, }) def create_alias(self): alias_name = self.parameters['AliasName'] target_key_id = self.parameters['TargetKeyId'] region = self.region if not alias_name.startswith('alias/'): raise JSONResponseError(400, 'Bad Request', body={'message': 'Invalid identifier', '__type': 'ValidationException'}) if alias_name in reserved_aliases: raise JSONResponseError(400, 'Bad Request', body={'__type': 'NotAuthorizedException'}) if ':' in alias_name: raise JSONResponseError(400, 'Bad Request', body={ 'message': '{alias_name} contains invalid characters for an alias'.format(**locals()), '__type': 'ValidationException'}) if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name): raise JSONResponseError(400, 'Bad Request', body={ 'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" .format(**locals()), '__type': 'ValidationException'}) if self.kms_backend.alias_exists(target_key_id): raise JSONResponseError(400, 'Bad Request', body={ 'message': 'Aliases must refer to keys. Not aliases', '__type': 'ValidationException'}) if self.kms_backend.alias_exists(alias_name): raise AlreadyExistsException(400, 'Bad Request', body={ 'message': 'An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} already exists' .format(**locals()), '__type': 'AlreadyExistsException'}) self.kms_backend.add_alias(target_key_id, alias_name) return json.dumps(None) def delete_alias(self): alias_name = self.parameters['AliasName'] region = self.region if not alias_name.startswith('alias/'): raise JSONResponseError(400, 'Bad Request', body={'message': 'Invalid identifier', '__type': 'ValidationException'}) if not self.kms_backend.alias_exists(alias_name): raise NotFoundException(400, 'Bad Request', body={ 'message': 'Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(**locals()), '__type': 'NotFoundException'}) self.kms_backend.delete_alias(alias_name) return json.dumps(None) def list_aliases(self): region = self.region response_aliases = [ { 'AliasArn': u'arn:aws:kms:{region}:012345678912:{reserved_alias}'.format(region=region, reserved_alias=reserved_alias), 'AliasName': reserved_alias } for reserved_alias in reserved_aliases ] backend_aliases = self.kms_backend.get_all_aliases() for target_key_id, aliases in backend_aliases.items(): for alias_name in aliases: response_aliases.append({ 'AliasArn': u'arn:aws:kms:{region}:012345678912:{alias_name}'.format(region=region, alias_name=alias_name), 'AliasName': alias_name, 'TargetKeyId': target_key_id, }) return json.dumps({ 'Truncated': False, 'Aliases': response_aliases, }) def enable_key_rotation(self): key_id = self.parameters.get('KeyId') _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) try: self.kms_backend.enable_key_rotation(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) def disable_key_rotation(self): key_id = self.parameters.get('KeyId') _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) try: self.kms_backend.disable_key_rotation(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) def get_key_rotation_status(self): key_id = self.parameters.get('KeyId') _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) try: rotation_enabled = self.kms_backend.get_key_rotation_status(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), '__type': 'NotFoundException'}) return json.dumps({'KeyRotationEnabled': rotation_enabled}) def put_key_policy(self): key_id = self.parameters.get('KeyId') policy_name = self.parameters.get('PolicyName') policy = self.parameters.get('Policy') _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) _assert_default_policy(policy_name) try: self.kms_backend.put_key_policy(key_id, policy) except KeyError: raise JSONResponseError(404, 'Not Found', body={ 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), '__type': 'NotFoundException'}) return json.dumps(None) def get_key_policy(self): key_id = self.parameters.get('KeyId') policy_name = self.parameters.get('PolicyName') _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) _assert_default_policy(policy_name) try: return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)}) except KeyError: raise JSONResponseError(404, 'Not Found', body={ 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), '__type': 'NotFoundException'}) def list_key_policies(self): key_id = self.parameters.get('KeyId') _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) try: self.kms_backend.describe_key(key_id) except KeyError: raise JSONResponseError(404, 'Not Found', body={ 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region,key_id=key_id), '__type': 'NotFoundException'}) return json.dumps({'Truncated': False, 'PolicyNames': ['default']}) def encrypt(self): """ We perform no encryption, we just encode the value as base64 and then decode it in decrypt(). """ value = self.parameters.get("Plaintext") return json.dumps({"CiphertextBlob": base64.b64encode(value).encode("utf-8")}) def decrypt(self): value = self.parameters.get("CiphertextBlob") return json.dumps({"Plaintext": base64.b64decode(value).encode("utf-8")}) def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): raise JSONResponseError(404, 'Not Found', body={'message': ' Invalid keyId', '__type': 'NotFoundException'}) def _assert_default_policy(policy_name): if policy_name != 'default': raise JSONResponseError(404, 'Not Found', body={ 'message': "No such policy exists", '__type': 'NotFoundException'})
apache-2.0
JackDandy/SickGear
lib/unidecode/x010.py
252
4110
data = ( 'k', # 0x00 'kh', # 0x01 'g', # 0x02 'gh', # 0x03 'ng', # 0x04 'c', # 0x05 'ch', # 0x06 'j', # 0x07 'jh', # 0x08 'ny', # 0x09 'nny', # 0x0a 'tt', # 0x0b 'tth', # 0x0c 'dd', # 0x0d 'ddh', # 0x0e 'nn', # 0x0f 'tt', # 0x10 'th', # 0x11 'd', # 0x12 'dh', # 0x13 'n', # 0x14 'p', # 0x15 'ph', # 0x16 'b', # 0x17 'bh', # 0x18 'm', # 0x19 'y', # 0x1a 'r', # 0x1b 'l', # 0x1c 'w', # 0x1d 's', # 0x1e 'h', # 0x1f 'll', # 0x20 'a', # 0x21 '[?]', # 0x22 'i', # 0x23 'ii', # 0x24 'u', # 0x25 'uu', # 0x26 'e', # 0x27 '[?]', # 0x28 'o', # 0x29 'au', # 0x2a '[?]', # 0x2b 'aa', # 0x2c 'i', # 0x2d 'ii', # 0x2e 'u', # 0x2f 'uu', # 0x30 'e', # 0x31 'ai', # 0x32 '[?]', # 0x33 '[?]', # 0x34 '[?]', # 0x35 'N', # 0x36 '\'', # 0x37 ':', # 0x38 '', # 0x39 '[?]', # 0x3a '[?]', # 0x3b '[?]', # 0x3c '[?]', # 0x3d '[?]', # 0x3e '[?]', # 0x3f '0', # 0x40 '1', # 0x41 '2', # 0x42 '3', # 0x43 '4', # 0x44 '5', # 0x45 '6', # 0x46 '7', # 0x47 '8', # 0x48 '9', # 0x49 ' / ', # 0x4a ' // ', # 0x4b 'n*', # 0x4c 'r*', # 0x4d 'l*', # 0x4e 'e*', # 0x4f 'sh', # 0x50 'ss', # 0x51 'R', # 0x52 'RR', # 0x53 'L', # 0x54 'LL', # 0x55 'R', # 0x56 'RR', # 0x57 'L', # 0x58 'LL', # 0x59 '[?]', # 0x5a '[?]', # 0x5b '[?]', # 0x5c '[?]', # 0x5d '[?]', # 0x5e '[?]', # 0x5f '[?]', # 0x60 '[?]', # 0x61 '[?]', # 0x62 '[?]', # 0x63 '[?]', # 0x64 '[?]', # 0x65 '[?]', # 0x66 '[?]', # 0x67 '[?]', # 0x68 '[?]', # 0x69 '[?]', # 0x6a '[?]', # 0x6b '[?]', # 0x6c '[?]', # 0x6d '[?]', # 0x6e '[?]', # 0x6f '[?]', # 0x70 '[?]', # 0x71 '[?]', # 0x72 '[?]', # 0x73 '[?]', # 0x74 '[?]', # 0x75 '[?]', # 0x76 '[?]', # 0x77 '[?]', # 0x78 '[?]', # 0x79 '[?]', # 0x7a '[?]', # 0x7b '[?]', # 0x7c '[?]', # 0x7d '[?]', # 0x7e '[?]', # 0x7f '[?]', # 0x80 '[?]', # 0x81 '[?]', # 0x82 '[?]', # 0x83 '[?]', # 0x84 '[?]', # 0x85 '[?]', # 0x86 '[?]', # 0x87 '[?]', # 0x88 '[?]', # 0x89 '[?]', # 0x8a '[?]', # 0x8b '[?]', # 0x8c '[?]', # 0x8d '[?]', # 0x8e '[?]', # 0x8f '[?]', # 0x90 '[?]', # 0x91 '[?]', # 0x92 '[?]', # 0x93 '[?]', # 0x94 '[?]', # 0x95 '[?]', # 0x96 '[?]', # 0x97 '[?]', # 0x98 '[?]', # 0x99 '[?]', # 0x9a '[?]', # 0x9b '[?]', # 0x9c '[?]', # 0x9d '[?]', # 0x9e '[?]', # 0x9f 'A', # 0xa0 'B', # 0xa1 'G', # 0xa2 'D', # 0xa3 'E', # 0xa4 'V', # 0xa5 'Z', # 0xa6 'T`', # 0xa7 'I', # 0xa8 'K', # 0xa9 'L', # 0xaa 'M', # 0xab 'N', # 0xac 'O', # 0xad 'P', # 0xae 'Zh', # 0xaf 'R', # 0xb0 'S', # 0xb1 'T', # 0xb2 'U', # 0xb3 'P`', # 0xb4 'K`', # 0xb5 'G\'', # 0xb6 'Q', # 0xb7 'Sh', # 0xb8 'Ch`', # 0xb9 'C`', # 0xba 'Z\'', # 0xbb 'C', # 0xbc 'Ch', # 0xbd 'X', # 0xbe 'J', # 0xbf 'H', # 0xc0 'E', # 0xc1 'Y', # 0xc2 'W', # 0xc3 'Xh', # 0xc4 'OE', # 0xc5 '[?]', # 0xc6 '[?]', # 0xc7 '[?]', # 0xc8 '[?]', # 0xc9 '[?]', # 0xca '[?]', # 0xcb '[?]', # 0xcc '[?]', # 0xcd '[?]', # 0xce '[?]', # 0xcf 'a', # 0xd0 'b', # 0xd1 'g', # 0xd2 'd', # 0xd3 'e', # 0xd4 'v', # 0xd5 'z', # 0xd6 't`', # 0xd7 'i', # 0xd8 'k', # 0xd9 'l', # 0xda 'm', # 0xdb 'n', # 0xdc 'o', # 0xdd 'p', # 0xde 'zh', # 0xdf 'r', # 0xe0 's', # 0xe1 't', # 0xe2 'u', # 0xe3 'p`', # 0xe4 'k`', # 0xe5 'g\'', # 0xe6 'q', # 0xe7 'sh', # 0xe8 'ch`', # 0xe9 'c`', # 0xea 'z\'', # 0xeb 'c', # 0xec 'ch', # 0xed 'x', # 0xee 'j', # 0xef 'h', # 0xf0 'e', # 0xf1 'y', # 0xf2 'w', # 0xf3 'xh', # 0xf4 'oe', # 0xf5 'f', # 0xf6 '[?]', # 0xf7 '[?]', # 0xf8 '[?]', # 0xf9 '[?]', # 0xfa ' // ', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
gpl-3.0
atyoung/synergy-osx
ext/gtest-1.6.0/xcode/Scripts/versiongenerate.py
3088
4536
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A script to prepare version informtion for use the gtest Info.plist file. This script extracts the version information from the configure.ac file and uses it to generate a header file containing the same information. The #defines in this header file will be included in during the generation of the Info.plist of the framework, giving the correct value to the version shown in the Finder. This script makes the following assumptions (these are faults of the script, not problems with the Autoconf): 1. The AC_INIT macro will be contained within the first 1024 characters of configure.ac 2. The version string will be 3 integers separated by periods and will be surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first segment represents the major version, the second represents the minor version and the third represents the fix version. 3. No ")" character exists between the opening "(" and closing ")" of AC_INIT, including in comments and character strings. """ import sys import re # Read the command line argument (the output directory for Version.h) if (len(sys.argv) < 3): print "Usage: versiongenerate.py input_dir output_dir" sys.exit(1) else: input_dir = sys.argv[1] output_dir = sys.argv[2] # Read the first 1024 characters of the configure.ac file config_file = open("%s/configure.ac" % input_dir, 'r') buffer_size = 1024 opening_string = config_file.read(buffer_size) config_file.close() # Extract the version string from the AC_INIT macro # The following init_expression means: # Extract three integers separated by periods and surrounded by squre # brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy # (*? is the non-greedy flag) since that would pull in everything between # the first "(" and the last ")" in the file. version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)", re.DOTALL) version_values = version_expression.search(opening_string) major_version = version_values.group(1) minor_version = version_values.group(2) fix_version = version_values.group(3) # Write the version information to a header file to be included in the # Info.plist file. file_data = """// // DO NOT MODIFY THIS FILE (but you can delete it) // // This file is autogenerated by the versiongenerate.py script. This script // is executed in a "Run Script" build phase when creating gtest.framework. This // header file is not used during compilation of C-source. Rather, it simply // defines some version strings for substitution in the Info.plist. Because of // this, we are not not restricted to C-syntax nor are we using include guards. // #define GTEST_VERSIONINFO_SHORT %s.%s #define GTEST_VERSIONINFO_LONG %s.%s.%s """ % (major_version, minor_version, major_version, minor_version, fix_version) version_file = open("%s/Version.h" % output_dir, 'w') version_file.write(file_data) version_file.close()
gpl-2.0
jamesbdunlop/Nebula
nebula_ui/nMayaui_core/customFileViewer.py
2
5843
import logging, os, sys from PySide2.QtCore import * from PySide2.QtWidgets import * from PySide2.QtCore import Signal from PySide2.QtCore import Slot from functools import partial try: import maya.cmds as cmds except ImportError: pass import nebula_libs.fileIO as fh logging.basicConfig() logger = logging.getLogger(__name__) class MayaTreeView(QTreeView): def __init__(self, parent=None, rootpath=None): """ The base class for treeViewer setup for maya scene files and objects and bobjects. """ QTreeView.__init__(self, parent) self.folder = rootpath self.myParent = parent self.setAnimated(False) self.setDragEnabled(False) self.setAcceptDrops(False) self.setAlternatingRowColors(True) self.setDropIndicatorShown(True) self.setExpandsOnDoubleClick(True) self.setSelectionMode(QAbstractItemView.SingleSelection) self.setSortingEnabled(True) self.sortByColumn(3, Qt.DescendingOrder)## Date time modified self.setColumnWidth(0, 200) self.setContextMenuPolicy(Qt.CustomContextMenu) ## Now the model self.model = QFileSystemModel(self) self.setModel(self.model) self.rootIndex = self.model.setRootPath('%s' % self.folder) self.setRootIndex(self.rootIndex) self.fileFilters = ['*.ma', '*.mb', '*.obj', '*.OBJ', '*.bob', '*.abc'] self.model.setNameFilterDisables(False) self.model.setNameFilters(self.fileFilters) self.model.setFilter(QDir.AllDirs | QDir.NoSymLinks | QDir.NoDotAndDotDot | QDir.Files) self.model.setReadOnly(True) def changeRootPath(self, newPath): logger.info('Changed view to {}'.format(newPath)) self.model.setRootPath('%s' % newPath) rootIndex = self.model.setRootPath('{}'.format(newPath)) self.setRootIndex(rootIndex) class FileViewer(QWidget): fileOpened = Signal(bool) def __init__(self, parent=None, projectPath=None): QWidget.__init__(self, parent) try: self.projectPath = projectPath or '{}/scenes'.format(cmds.workspace(q=True, rootDirectory=True)) except NameError: self.projectPath = "/" self.mainLayout = QVBoxLayout(self) self.pathlabel = QLabel('cur: {}'.format(self.projectPath)) self.folderView = MayaTreeView(self, rootpath = self.projectPath) self.folderView.setColumnWidth(0, 100) self.folderView.setColumnWidth(1, 50) self.folderView.setColumnWidth(2, 50) self.folderView.doubleClicked.connect(partial(self._doubleClick)) self.folderView.doubleClicked.connect(self.resizeColumn) self.folderView.clicked.connect(self.resizeColumn) self.folderView.expanded.connect(self.resizeColumn) self.refreshButton = QPushButton("refresh view") self.refreshButton.clicked.connect(self._refreshView) self.mainLayout.addWidget(self.pathlabel) self.mainLayout.addWidget(self.folderView) self.mainLayout.addWidget(self.refreshButton) self.updateProjectPath() def _doubleClick(self, sender): if not self._viewerSelectionInfo()['isDir']: file = fh.mayaFileHandler(self._viewerSelectionInfo()['filePath']) self.fileOpened.emit(True) def _refreshView(self): self.folderView.changeRootPath("/") self._setProjRoot() self.folderView.changeRootPath(self.projectPath) self._setProjRoot() @Slot(str) def updateProjectPath(self, projectPath=None): """Used to update the project paths in the base widget""" if not str(projectPath).endswith("{}scenes".format(os.path.sep)): projectPath = "{}/scenes".format(projectPath) if not os.path.isdir(projectPath): try: self.projectPath = cmds.workspace(query=True, act=True) except: self.projectPath = "/" else: self.projectPath = projectPath self._setProjRoot() def _setProjRoot(self): logger.info('self.projectPath: {}'.format(self.projectPath)) try: # for standalone use curProjPath = cmds.workspace(query=True, act=True) except: curProjPath = "C:/" if self.projectPath != curProjPath: self.projectPath = curProjPath self.folderView.changeRootPath(self.projectPath) self.pathlabel.setText("cur: {}".format(self.projectPath)) def resizeColumn(self): self.folderView.resizeColumnToContents(1) def repaint(self): QWidget.repaint(self) self.resizeColumn() def changeRootPath(self, newPath): self.folderView.changeRootPath(newPath) @property def curPath(self): return self.projectPath @property def curItem(self): return self.folderView.currentItem() @property def curModelIndex(self): return QModelIndex(self.folderView.currentIndex()) def _viewerSelectionInfo(self, modelIndex=None): """ Fetch the current file path and it's parent directory from the model.fileInfo as a dictionary for use :return: """ if not modelIndex: modelIndex = self.curModelIndex self.fileInfo = self.folderView.model.fileInfo(modelIndex) self.fileDir = self.fileInfo.absoluteDir() self.filePath = self.fileInfo.absoluteFilePath() self.isDir = self.folderView.model.isDir(modelIndex) data = {} data['dir'] = str(self.fileDir.absolutePath()) data['filePath'] = str(self.filePath) data['isDir'] = self.isDir return data if __name__ == '__main__': qtapp = QApplication(sys.argv) myUI = FileViewer() myUI.show() qtapp.exec_()
apache-2.0
Arch-vile/hwo2014-team-129
python/main.py
4
2289
import json import socket import sys class NoobBot(object): def __init__(self, socket, name, key): self.socket = socket self.name = name self.key = key def msg(self, msg_type, data): self.send(json.dumps({"msgType": msg_type, "data": data})) def send(self, msg): self.socket.send(msg + "\n") def join(self): return self.msg("join", {"name": self.name, "key": self.key}) def throttle(self, throttle): self.msg("throttle", throttle) def ping(self): self.msg("ping", {}) def run(self): self.join() self.msg_loop() def on_join(self, data): print("Joined") self.ping() def on_game_start(self, data): print("Race started") self.ping() def on_car_positions(self, data): self.throttle(0.5) def on_crash(self, data): print("Someone crashed") self.ping() def on_game_end(self, data): print("Race ended") self.ping() def on_error(self, data): print("Error: {0}".format(data)) self.ping() def msg_loop(self): msg_map = { 'join': self.on_join, 'gameStart': self.on_game_start, 'carPositions': self.on_car_positions, 'crash': self.on_crash, 'gameEnd': self.on_game_end, 'error': self.on_error, } socket_file = s.makefile() line = socket_file.readline() while line: msg = json.loads(line) msg_type, data = msg['msgType'], msg['data'] if msg_type in msg_map: msg_map[msg_type](data) else: print("Got {0}".format(msg_type)) self.ping() line = socket_file.readline() if __name__ == "__main__": if len(sys.argv) != 5: print("Usage: ./run host port botname botkey") else: host, port, name, key = sys.argv[1:5] print("Connecting with parameters:") print("host={0}, port={1}, bot name={2}, key={3}".format(*sys.argv[1:5])) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, int(port))) bot = NoobBot(s, name, key) bot.run()
apache-2.0
mzbenami/netmiko
tests/old_format/test_fixture.py
14
5620
#!/usr/bin/env python """ This module runs tests against Cisco IOS devices. setup_module: setup variables for later use. test_disable_paging: disable paging test_ssh_connect: verify ssh connectivity test_send_command: send a command test_send_command_expect: send a command test_base_prompt: test the base prompt test_strip_prompt: test removing the prompt test_strip_command: test stripping extraneous info after sending a command test_normalize_linefeeds: ensure \n is the only line termination character in output test_clear_buffer: clear text buffer test_enable_mode: verify enter enable mode test_config_mode: verify enter config mode test_exit_config_mode: verify exit config mode test_command_set: verify sending a set of config commands test_commands_from_file: verify sending a set of config commands from a file test_exit_enable_mode: verify exit enable mode test_disconnect: cleanly disconnect the SSH session """ from os import path import time import pytest from netmiko import ConnectHandler from test_utils import parse_yaml def test_disable_paging(net_connect, commands, expected_responses): ''' Verify paging is disabled by looking for string after when paging would normally occur ''' multiple_line_output = net_connect.send_command(commands["extended_output"]) assert expected_responses["multiple_line_output"] in multiple_line_output # #def test_ssh_connect(): # ''' # Verify the connection was established successfully # ''' # show_version = net_connect.send_command(commands["version"]) # assert EXPECTED_RESPONSES["version_banner"] in show_version # # #def test_send_command(): # ''' # Verify a command can be sent down the channel successfully # ''' # show_ip = net_connect.send_command(commands["basic"]) # assert EXPECTED_RESPONSES['interface_ip'] in show_ip # # #def test_send_command_expect(): # ''' # Verify a command can be sent down the channel successfully using _expect method # ''' # show_ip_alt = net_connect.send_command_expect(commands["basic"]) # assert EXPECTED_RESPONSES['interface_ip'] in show_ip_alt # # #def test_base_prompt(): # ''' # Verify the router prompt is detected correctly # ''' # assert net_connect.base_prompt == EXPECTED_RESPONSES['base_prompt'] # # #def test_strip_prompt(): # ''' # Ensure the router prompt is not in the command output # ''' # show_ip = net_connect.send_command(commands["basic"]) # show_ip_alt = net_connect.send_command_expect(commands["basic"]) # assert EXPECTED_RESPONSES['base_prompt'] not in show_ip # assert EXPECTED_RESPONSES['base_prompt'] not in show_ip_alt # # #def test_strip_command(): # ''' # Ensure that the command that was executed does not show up in the command output # ''' # show_ip = net_connect.send_command(commands["basic"]) # show_ip_alt = net_connect.send_command_expect(commands["basic"]) # assert commands['basic'] not in show_ip # assert commands['basic'] not in show_ip_alt # # #def test_normalize_linefeeds(): # ''' # Ensure no '\r\n' sequences # ''' # show_version = net_connect.send_command(commands["version"]) # show_version_alt = net_connect.send_command_expect(commands["version"]) # assert not '\r\n' in show_version # assert not '\r\n' in show_version_alt # # #def test_clear_buffer(): # ''' # Test that clearing the buffer works # ''' # # Manually send a command down the channel so that data needs read. # net_connect.remote_conn.sendall(commands["basic"] + '\n') # time.sleep(2) # net_connect.clear_buffer() # # # Should not be anything there on the second pass # clear_buffer_check = net_connect.clear_buffer() # assert clear_buffer_check is None # # #def test_enable_mode(): # ''' # Test entering enable mode # ''' # router_prompt = net_connect.find_prompt() # assert router_prompt == EXPECTED_RESPONSES['router_prompt'] # net_connect.enable() # enable_prompt = net_connect.find_prompt() # assert enable_prompt == EXPECTED_RESPONSES['enable_prompt'] # # #def test_config_mode(): # ''' # Test enter config mode # ''' # net_connect.config_mode() # assert EXPECTED_RESPONSES['config_mode'] in net_connect.find_prompt() # # #def test_exit_config_mode(): # ''' # Test exit config mode # ''' # net_connect.exit_config_mode() # assert EXPECTED_RESPONSES['config_mode'] not in net_connect.find_prompt() # # #def test_command_set(): # ''' # Test sending configuration commands # ''' # config_commands = commands['config'] # net_connect.send_config_set(config_commands[0:1]) # config_commands_output = net_connect.send_command('show run | inc logging buffer') # assert config_commands[0] in config_commands_output # net_connect.send_config_set(config_commands) # config_commands_output = net_connect.send_command('show run | inc logging buffer') # assert config_commands[-1] in config_commands_output # # #def test_commands_from_file(): # ''' # Test sending configuration commands from a file # ''' # net_connect.send_config_from_file(commands['config_file']) # config_commands_output = net_connect.send_command('show run | inc logging buffer') # assert EXPECTED_RESPONSES["file_check_cmd"] in config_commands_output # # #def test_exit_enable_mode(): # ''' # Test exit enable mode # ''' # exit_enable_mode = net_connect.exit_enable_mode() # assert EXPECTED_RESPONSES["router_prompt"] in exit_enable_mode # # #def test_disconnect(): # ''' # Terminate the SSH session # ''' # net_connect.disconnect() #
mit
klonage/nlt-gcs
packages/IronPython.StdLib.2.7.4/content/Lib/audiodev.py
61
7857
"""Classes for manipulating audio devices (currently only for Sun and SGI)""" from warnings import warnpy3k warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2) del warnpy3k __all__ = ["error","AudioDev"] class error(Exception): pass class Play_Audio_sgi: # Private instance variables ## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \ ## params, config, inited_outrate, inited_width, \ ## inited_nchannels, port, converter, classinited: private classinited = 0 frameratelist = nchannelslist = sampwidthlist = None def initclass(self): import AL self.frameratelist = [ (48000, AL.RATE_48000), (44100, AL.RATE_44100), (32000, AL.RATE_32000), (22050, AL.RATE_22050), (16000, AL.RATE_16000), (11025, AL.RATE_11025), ( 8000, AL.RATE_8000), ] self.nchannelslist = [ (1, AL.MONO), (2, AL.STEREO), (4, AL.QUADRO), ] self.sampwidthlist = [ (1, AL.SAMPLE_8), (2, AL.SAMPLE_16), (3, AL.SAMPLE_24), ] self.classinited = 1 def __init__(self): import al, AL if not self.classinited: self.initclass() self.oldparams = [] self.params = [AL.OUTPUT_RATE, 0] self.config = al.newconfig() self.inited_outrate = 0 self.inited_width = 0 self.inited_nchannels = 0 self.converter = None self.port = None return def __del__(self): if self.port: self.stop() if self.oldparams: import al, AL al.setparams(AL.DEFAULT_DEVICE, self.oldparams) self.oldparams = [] def wait(self): if not self.port: return import time while self.port.getfilled() > 0: time.sleep(0.1) self.stop() def stop(self): if self.port: self.port.closeport() self.port = None if self.oldparams: import al, AL al.setparams(AL.DEFAULT_DEVICE, self.oldparams) self.oldparams = [] def setoutrate(self, rate): for (raw, cooked) in self.frameratelist: if rate == raw: self.params[1] = cooked self.inited_outrate = 1 break else: raise error, 'bad output rate' def setsampwidth(self, width): for (raw, cooked) in self.sampwidthlist: if width == raw: self.config.setwidth(cooked) self.inited_width = 1 break else: if width == 0: import AL self.inited_width = 0 self.config.setwidth(AL.SAMPLE_16) self.converter = self.ulaw2lin else: raise error, 'bad sample width' def setnchannels(self, nchannels): for (raw, cooked) in self.nchannelslist: if nchannels == raw: self.config.setchannels(cooked) self.inited_nchannels = 1 break else: raise error, 'bad # of channels' def writeframes(self, data): if not (self.inited_outrate and self.inited_nchannels): raise error, 'params not specified' if not self.port: import al, AL self.port = al.openport('Python', 'w', self.config) self.oldparams = self.params[:] al.getparams(AL.DEFAULT_DEVICE, self.oldparams) al.setparams(AL.DEFAULT_DEVICE, self.params) if self.converter: data = self.converter(data) self.port.writesamps(data) def getfilled(self): if self.port: return self.port.getfilled() else: return 0 def getfillable(self): if self.port: return self.port.getfillable() else: return self.config.getqueuesize() # private methods ## if 0: access *: private def ulaw2lin(self, data): import audioop return audioop.ulaw2lin(data, 2) class Play_Audio_sun: ## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \ ## inited_nchannels, converter: private def __init__(self): self.outrate = 0 self.sampwidth = 0 self.nchannels = 0 self.inited_outrate = 0 self.inited_width = 0 self.inited_nchannels = 0 self.converter = None self.port = None return def __del__(self): self.stop() def setoutrate(self, rate): self.outrate = rate self.inited_outrate = 1 def setsampwidth(self, width): self.sampwidth = width self.inited_width = 1 def setnchannels(self, nchannels): self.nchannels = nchannels self.inited_nchannels = 1 def writeframes(self, data): if not (self.inited_outrate and self.inited_width and self.inited_nchannels): raise error, 'params not specified' if not self.port: import sunaudiodev, SUNAUDIODEV self.port = sunaudiodev.open('w') info = self.port.getinfo() info.o_sample_rate = self.outrate info.o_channels = self.nchannels if self.sampwidth == 0: info.o_precision = 8 self.o_encoding = SUNAUDIODEV.ENCODING_ULAW # XXX Hack, hack -- leave defaults else: info.o_precision = 8 * self.sampwidth info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR self.port.setinfo(info) if self.converter: data = self.converter(data) self.port.write(data) def wait(self): if not self.port: return self.port.drain() self.stop() def stop(self): if self.port: self.port.flush() self.port.close() self.port = None def getfilled(self): if self.port: return self.port.obufcount() else: return 0 ## # Nobody remembers what this method does, and it's broken. :-( ## def getfillable(self): ## return BUFFERSIZE - self.getfilled() def AudioDev(): # Dynamically try to import and use a platform specific module. try: import al except ImportError: try: import sunaudiodev return Play_Audio_sun() except ImportError: try: import Audio_mac except ImportError: raise error, 'no audio device' else: return Audio_mac.Play_Audio_mac() else: return Play_Audio_sgi() def test(fn = None): import sys if sys.argv[1:]: fn = sys.argv[1] else: fn = 'f:just samples:just.aif' import aifc af = aifc.open(fn, 'r') print fn, af.getparams() p = AudioDev() p.setoutrate(af.getframerate()) p.setsampwidth(af.getsampwidth()) p.setnchannels(af.getnchannels()) BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels() while 1: data = af.readframes(BUFSIZ) if not data: break print len(data) p.writeframes(data) p.wait() if __name__ == '__main__': test()
gpl-3.0
geminy/aidear
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/decoder.py
70
31330
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Code for decoding protocol buffer primitives. This code is very similar to encoder.py -- read the docs for that module first. A "decoder" is a function with the signature: Decode(buffer, pos, end, message, field_dict) The arguments are: buffer: The string containing the encoded message. pos: The current position in the string. end: The position in the string where the current message ends. May be less than len(buffer) if we're reading a sub-message. message: The message object into which we're parsing. field_dict: message._fields (avoids a hashtable lookup). The decoder reads the field and stores it into field_dict, returning the new buffer position. A decoder for a repeated field may proactively decode all of the elements of that field, if they appear consecutively. Note that decoders may throw any of the following: IndexError: Indicates a truncated message. struct.error: Unpacking of a fixed-width field failed. message.DecodeError: Other errors. Decoders are expected to raise an exception if they are called with pos > end. This allows callers to be lax about bounds checking: it's fineto read past "end" as long as you are sure that someone else will notice and throw an exception later on. Something up the call stack is expected to catch IndexError and struct.error and convert them to message.DecodeError. Decoders are constructed using decoder constructors with the signature: MakeDecoder(field_number, is_repeated, is_packed, key, new_default) The arguments are: field_number: The field number of the field we want to decode. is_repeated: Is the field a repeated field? (bool) is_packed: Is the field a packed field? (bool) key: The key to use when looking up the field within field_dict. (This is actually the FieldDescriptor but nothing in this file should depend on that.) new_default: A function which takes a message object as a parameter and returns a new instance of the default value for this field. (This is called for repeated fields and sub-messages, when an instance does not already exist.) As with encoders, we define a decoder constructor for every type of field. Then, for every field of every message class we construct an actual decoder. That decoder goes into a dict indexed by tag, so when we decode a message we repeatedly read a tag, look up the corresponding decoder, and invoke it. """ __author__ = 'kenton@google.com (Kenton Varda)' import struct import six if six.PY3: long = int from google.protobuf.internal import encoder from google.protobuf.internal import wire_format from google.protobuf import message # This will overflow and thus become IEEE-754 "infinity". We would use # "float('inf')" but it doesn't work on Windows pre-Python-2.6. _POS_INF = 1e10000 _NEG_INF = -_POS_INF _NAN = _POS_INF * 0 # This is not for optimization, but rather to avoid conflicts with local # variables named "message". _DecodeError = message.DecodeError def _VarintDecoder(mask, result_type): """Return an encoder for a basic varint value (does not include tag). Decoded values will be bitwise-anded with the given mask before being returned, e.g. to limit them to 32 bits. The returned decoder does not take the usual "end" parameter -- the caller is expected to do bounds checking after the fact (often the caller can defer such checking until later). The decoder returns a (value, new_pos) pair. """ def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: b = six.indexbytes(buffer, pos) result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): result &= mask result = result_type(result) return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint def _SignedVarintDecoder(mask, result_type): """Like _VarintDecoder() but decodes signed values.""" def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: b = six.indexbytes(buffer, pos) result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): if result > 0x7fffffffffffffff: result -= (1 << 64) result |= ~mask else: result &= mask result = result_type(result) return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint # We force 32-bit values to int and 64-bit values to long to make # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) simpler. _DecodeVarint = _VarintDecoder((1 << 64) - 1, long) _DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, long) # Use these versions for values which must be limited to 32 bits. _DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) _DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1, int) def ReadTag(buffer, pos): """Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. We return the raw bytes of the tag rather than decoding them. The raw bytes can then be used to look up the proper decoder. This effectively allows us to trade some work that would be done in pure-python (decoding a varint) for work that is done in C (searching for a byte string in a hash table). In a low-level language it would be much cheaper to decode the varint and use that, but not in Python. """ start = pos while six.indexbytes(buffer, pos) & 0x80: pos += 1 pos += 1 return (buffer[start:pos], pos) # -------------------------------------------------------------------- def _SimpleDecoder(wire_type, decode_value): """Return a constructor for a decoder for fields of a particular type. Args: wire_type: The field's wire type. decode_value: A function which decodes an individual value, e.g. _DecodeVarint() """ def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default): if is_packed: local_DecodeVarint = _DecodeVarint def DecodePackedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) (endpoint, pos) = local_DecodeVarint(buffer, pos) endpoint += pos if endpoint > end: raise _DecodeError('Truncated message.') while pos < endpoint: (element, pos) = decode_value(buffer, pos) value.append(element) if pos > endpoint: del value[-1] # Discard corrupt value. raise _DecodeError('Packed element was truncated.') return pos return DecodePackedField elif is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_type) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (element, new_pos) = decode_value(buffer, pos) value.append(element) # Predict that the next tag is another copy of the same repeated # field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos >= end: # Prediction failed. Return. if new_pos > end: raise _DecodeError('Truncated message.') return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (field_dict[key], pos) = decode_value(buffer, pos) if pos > end: del field_dict[key] # Discard corrupt value. raise _DecodeError('Truncated message.') return pos return DecodeField return SpecificDecoder def _ModifiedDecoder(wire_type, decode_value, modify_value): """Like SimpleDecoder but additionally invokes modify_value on every value before storing it. Usually modify_value is ZigZagDecode. """ # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but # not enough to make a significant difference. def InnerDecode(buffer, pos): (result, new_pos) = decode_value(buffer, pos) return (modify_value(result), new_pos) return _SimpleDecoder(wire_type, InnerDecode) def _StructPackDecoder(wire_type, format): """Return a constructor for a decoder for a fixed-width field. Args: wire_type: The field's wire type. format: The format string to pass to struct.unpack(). """ value_size = struct.calcsize(format) local_unpack = struct.unpack # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but # not enough to make a significant difference. # Note that we expect someone up-stack to catch struct.error and convert # it to _DecodeError -- this way we don't have to set up exception- # handling blocks every time we parse one value. def InnerDecode(buffer, pos): new_pos = pos + value_size result = local_unpack(format, buffer[pos:new_pos])[0] return (result, new_pos) return _SimpleDecoder(wire_type, InnerDecode) def _FloatDecoder(): """Returns a decoder for a float field. This code works around a bug in struct.unpack for non-finite 32-bit floating-point values. """ local_unpack = struct.unpack def InnerDecode(buffer, pos): # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. new_pos = pos + 4 float_bytes = buffer[pos:new_pos] # If this value has all its exponent bits set, then it's non-finite. # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. # To avoid that, we parse it specially. if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): # If at least one significand bit is set... if float_bytes[0:3] != b'\x00\x00\x80': return (_NAN, new_pos) # If sign bit is set... if float_bytes[3:4] == b'\xFF': return (_NEG_INF, new_pos) return (_POS_INF, new_pos) # Note that we expect someone up-stack to catch struct.error and convert # it to _DecodeError -- this way we don't have to set up exception- # handling blocks every time we parse one value. result = local_unpack('<f', float_bytes)[0] return (result, new_pos) return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode) def _DoubleDecoder(): """Returns a decoder for a double field. This code works around a bug in struct.unpack for not-a-number. """ local_unpack = struct.unpack def InnerDecode(buffer, pos): # We expect a 64-bit value in little-endian byte order. Bit 1 is the sign # bit, bits 2-12 represent the exponent, and bits 13-64 are the significand. new_pos = pos + 8 double_bytes = buffer[pos:new_pos] # If this value has all its exponent bits set and at least one significand # bit set, it's not a number. In Python 2.4, struct.unpack will treat it # as inf or -inf. To avoid that, we treat it specially. if ((double_bytes[7:8] in b'\x7F\xFF') and (double_bytes[6:7] >= b'\xF0') and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): return (_NAN, new_pos) # Note that we expect someone up-stack to catch struct.error and convert # it to _DecodeError -- this way we don't have to set up exception- # handling blocks every time we parse one value. result = local_unpack('<d', double_bytes)[0] return (result, new_pos) return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) def EnumDecoder(field_number, is_repeated, is_packed, key, new_default): enum_type = key.enum_type if is_packed: local_DecodeVarint = _DecodeVarint def DecodePackedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) (endpoint, pos) = local_DecodeVarint(buffer, pos) endpoint += pos if endpoint > end: raise _DecodeError('Truncated message.') while pos < endpoint: value_start_pos = pos (element, pos) = _DecodeSignedVarint32(buffer, pos) if element in enum_type.values_by_number: value.append(element) else: if not message._unknown_fields: message._unknown_fields = [] tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) message._unknown_fields.append( (tag_bytes, buffer[value_start_pos:pos])) if pos > endpoint: if element in enum_type.values_by_number: del value[-1] # Discard corrupt value. else: del message._unknown_fields[-1] raise _DecodeError('Packed element was truncated.') return pos return DecodePackedField elif is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (element, new_pos) = _DecodeSignedVarint32(buffer, pos) if element in enum_type.values_by_number: value.append(element) else: if not message._unknown_fields: message._unknown_fields = [] message._unknown_fields.append( (tag_bytes, buffer[pos:new_pos])) # Predict that the next tag is another copy of the same repeated # field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos >= end: # Prediction failed. Return. if new_pos > end: raise _DecodeError('Truncated message.') return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): value_start_pos = pos (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) if pos > end: raise _DecodeError('Truncated message.') if enum_value in enum_type.values_by_number: field_dict[key] = enum_value else: if not message._unknown_fields: message._unknown_fields = [] tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) message._unknown_fields.append( (tag_bytes, buffer[value_start_pos:pos])) return pos return DecodeField # -------------------------------------------------------------------- Int32Decoder = _SimpleDecoder( wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) Int64Decoder = _SimpleDecoder( wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) SInt32Decoder = _ModifiedDecoder( wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) SInt64Decoder = _ModifiedDecoder( wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) # Note that Python conveniently guarantees that when using the '<' prefix on # formats, they will also have the same size across all platforms (as opposed # to without the prefix, where their sizes depend on the C compiler's basic # type sizes). Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I') Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q') SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i') SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q') FloatDecoder = _FloatDecoder() DoubleDecoder = _DoubleDecoder() BoolDecoder = _ModifiedDecoder( wire_format.WIRETYPE_VARINT, _DecodeVarint, bool) def StringDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a string field.""" local_DecodeVarint = _DecodeVarint local_unicode = six.text_type def _ConvertToUnicode(byte_str): try: return local_unicode(byte_str, 'utf-8') except UnicodeDecodeError as e: # add more information to the error message and re-raise it. e.reason = '%s in field: %s' % (e, key.full_name) raise assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') value.append(_ConvertToUnicode(buffer[pos:new_pos])) # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) return new_pos return DecodeField def BytesDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a bytes field.""" local_DecodeVarint = _DecodeVarint assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') value.append(buffer[pos:new_pos]) # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') field_dict[key] = buffer[pos:new_pos] return new_pos return DecodeField def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a group field.""" end_tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_END_GROUP) end_tag_len = len(end_tag_bytes) assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_START_GROUP) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) # Read sub-message. pos = value.add()._InternalParse(buffer, pos, end) # Read end tag. new_pos = pos+end_tag_len if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: raise _DecodeError('Missing group end tag.') # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) # Read sub-message. pos = value._InternalParse(buffer, pos, end) # Read end tag. new_pos = pos+end_tag_len if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: raise _DecodeError('Missing group end tag.') return new_pos return DecodeField def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a message field.""" local_DecodeVarint = _DecodeVarint assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: # Read length. (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated message.') # Read sub-message. if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: # The only reason _InternalParse would return early is if it # encountered an end-group tag. raise _DecodeError('Unexpected end-group tag.') # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) # Read length. (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated message.') # Read sub-message. if value._InternalParse(buffer, pos, new_pos) != new_pos: # The only reason _InternalParse would return early is if it encountered # an end-group tag. raise _DecodeError('Unexpected end-group tag.') return new_pos return DecodeField # -------------------------------------------------------------------- MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) def MessageSetItemDecoder(extensions_by_number): """Returns a decoder for a MessageSet item. The parameter is the _extensions_by_number map for the message class. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) local_ReadTag = ReadTag local_DecodeVarint = _DecodeVarint local_SkipField = SkipField def DecodeItem(buffer, pos, end, message, field_dict): message_set_item_start = pos type_id = -1 message_start = -1 message_end = -1 # Technically, type_id and message can appear in any order, so we need # a little loop here. while 1: (tag_bytes, pos) = local_ReadTag(buffer, pos) if tag_bytes == type_id_tag_bytes: (type_id, pos) = local_DecodeVarint(buffer, pos) elif tag_bytes == message_tag_bytes: (size, message_start) = local_DecodeVarint(buffer, pos) pos = message_end = message_start + size elif tag_bytes == item_end_tag_bytes: break else: pos = SkipField(buffer, pos, end, tag_bytes) if pos == -1: raise _DecodeError('Missing group end tag.') if pos > end: raise _DecodeError('Truncated message.') if type_id == -1: raise _DecodeError('MessageSet item missing type_id.') if message_start == -1: raise _DecodeError('MessageSet item missing message.') extension = extensions_by_number.get(type_id) if extension is not None: value = field_dict.get(extension) if value is None: value = field_dict.setdefault( extension, extension.message_type._concrete_class()) if value._InternalParse(buffer, message_start,message_end) != message_end: # The only reason _InternalParse would return early is if it encountered # an end-group tag. raise _DecodeError('Unexpected end-group tag.') else: if not message._unknown_fields: message._unknown_fields = [] message._unknown_fields.append((MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos])) return pos return DecodeItem # -------------------------------------------------------------------- def MapDecoder(field_descriptor, new_default, is_message_map): """Returns a decoder for a map field.""" key = field_descriptor tag_bytes = encoder.TagBytes(field_descriptor.number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) local_DecodeVarint = _DecodeVarint # Can't read _concrete_class yet; might not be initialized. message_type = field_descriptor.message_type def DecodeMap(buffer, pos, end, message, field_dict): submsg = message_type._concrete_class() value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: # Read length. (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated message.') # Read sub-message. submsg.Clear() if submsg._InternalParse(buffer, pos, new_pos) != new_pos: # The only reason _InternalParse would return early is if it # encountered an end-group tag. raise _DecodeError('Unexpected end-group tag.') if is_message_map: value[submsg.key].MergeFrom(submsg.value) else: value[submsg.key] = submsg.value # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeMap # -------------------------------------------------------------------- # Optimization is not as heavy here because calls to SkipField() are rare, # except for handling end-group tags. def _SkipVarint(buffer, pos, end): """Skip a varint value. Returns the new position.""" # Previously ord(buffer[pos]) raised IndexError when pos is out of range. # With this code, ord(b'') raises TypeError. Both are handled in # python_message.py to generate a 'Truncated message' error. while ord(buffer[pos:pos+1]) & 0x80: pos += 1 pos += 1 if pos > end: raise _DecodeError('Truncated message.') return pos def _SkipFixed64(buffer, pos, end): """Skip a fixed64 value. Returns the new position.""" pos += 8 if pos > end: raise _DecodeError('Truncated message.') return pos def _SkipLengthDelimited(buffer, pos, end): """Skip a length-delimited value. Returns the new position.""" (size, pos) = _DecodeVarint(buffer, pos) pos += size if pos > end: raise _DecodeError('Truncated message.') return pos def _SkipGroup(buffer, pos, end): """Skip sub-group. Returns the new position.""" while 1: (tag_bytes, pos) = ReadTag(buffer, pos) new_pos = SkipField(buffer, pos, end, tag_bytes) if new_pos == -1: return pos pos = new_pos def _EndGroup(buffer, pos, end): """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" return -1 def _SkipFixed32(buffer, pos, end): """Skip a fixed32 value. Returns the new position.""" pos += 4 if pos > end: raise _DecodeError('Truncated message.') return pos def _RaiseInvalidWireType(buffer, pos, end): """Skip function for unknown wire types. Raises an exception.""" raise _DecodeError('Tag had invalid wire type.') def _FieldSkipper(): """Constructs the SkipField function.""" WIRETYPE_TO_SKIPPER = [ _SkipVarint, _SkipFixed64, _SkipLengthDelimited, _SkipGroup, _EndGroup, _SkipFixed32, _RaiseInvalidWireType, _RaiseInvalidWireType, ] wiretype_mask = wire_format.TAG_TYPE_MASK def SkipField(buffer, pos, end, tag_bytes): """Skips a field with the specified tag. |pos| should point to the byte immediately after the tag. Returns: The new position (after the tag value), or -1 if the tag is an end-group tag (in which case the calling loop should break). """ # The wire type is always in the first byte since varints are little-endian. wire_type = ord(tag_bytes[0:1]) & wiretype_mask return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end) return SkipField SkipField = _FieldSkipper()
gpl-3.0