repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
xin3liang/platform_external_chromium_org
tools/telemetry/telemetry/unittest/progress_reporter_unittest.py
33
1548
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from telemetry.unittest import progress_reporter class TestFoo(unittest.TestCase): # Test method doesn't have test- prefix intentionally. This is so that # run_test script won't run this test. def RunPassingTest(self): pass def RunFailingTest(self): self.fail('expected failure') class LoggingProgressReporter(object): def __init__(self): self._call_log = [] @property def call_log(self): return tuple(self._call_log) def __getattr__(self, name): def wrapper(*_): self._call_log.append(name) return wrapper class ProgressReporterTest(unittest.TestCase): def testTestRunner(self): suite = progress_reporter.TestSuite() suite.addTest(TestFoo(methodName='RunPassingTest')) suite.addTest(TestFoo(methodName='RunFailingTest')) reporter = LoggingProgressReporter() runner = progress_reporter.TestRunner() progress_reporters = (reporter,) result = runner.run(suite, progress_reporters, 1, None) self.assertEqual(len(result.successes), 1) self.assertEqual(len(result.failures), 1) self.assertEqual(len(result.failures_and_errors), 1) expected = ( 'StartTestRun', 'StartTestSuite', 'StartTest', 'Success', 'StopTest', 'StartTest', 'Failure', 'StopTest', 'StopTestSuite', 'StopTestRun', ) self.assertEqual(reporter.call_log, expected)
bsd-3-clause
bbc/kamaelia
Code/Python/Kamaelia/Kamaelia/UI/OpenGL/SkyGrassBackground.py
12
2597
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """\ ====================== Sky & Grass background ====================== A very simple component showing a plane with the upper half coloured light blue and the lower half green. Can be used for a background. This component is a subclass of OpenGLComponent and therefore uses the OpenGL display service. Example Usage ------------- Only a background:: SkyGrassBackground(size=(5000,5000,0), position=(0,0,-100)).activate() Axon.Scheduler.scheduler.run.runThreads() """ import Axon import pygame from pygame.locals import * from OpenGL.GL import * from OpenGL.GLU import * from OpenGLComponent import * class SkyGrassBackground(OpenGLComponent): """\ SkyGrassBackground(...) -> A new SkyGrassBackground component. A very simple component showing a plane with the upper half coloured light blue and the lower half green. Can be used for a background. """ def setup(self): self.w = self.size.x/2.0 self.h = self.size.y/2.0 def draw(self): glBegin(GL_QUADS) glColor4f(0.85, 0.85, 1.0, 1.0) glVertex3f(-self.w, self.h, 0) glVertex3f(self.w, self.h, 0) glVertex3f(self.w, 0.0, 0) glVertex3f(-self.w, 0.0, 0) glColor4f(0.75, 1.0, 0.75, 1.0) glVertex3f(-self.w, 0.0, 0) glVertex3f(self.w, 0.0, 0) glVertex3f(self.w, -self.h, -0) glVertex3f(-self.w, -self.h, -0) glEnd() __kamaelia_components__ = (SkyGrassBackground,) if __name__=='__main__': SkyGrassBackground(size=(5000,5000,0), position=(0,0,-100)).activate() Axon.Scheduler.scheduler.run.runThreads() # Licensed to the BBC under a Contributor Agreement: THF
apache-2.0
hynnet/openwrt-mt7620
staging_dir/host/lib/python2.7/cProfile.py
169
6515
#! /usr/bin/env python """Python interface for the 'lsprof' profiler. Compatible with the 'profile' module. """ __all__ = ["run", "runctx", "help", "Profile"] import _lsprof # ____________________________________________________________ # Simple interface def run(statement, filename=None, sort=-1): """Run statement under profiler optionally saving results in filename This function takes a single argument that can be passed to the "exec" statement, and an optional file name. In all cases this routine attempts to "exec" its first argument and gather profiling statistics from the execution. If no file name is present, then this function automatically prints a simple profiling report, sorted by the standard name string (file/line/function-name) that is presented in each line. """ prof = Profile() result = None try: try: prof = prof.run(statement) except SystemExit: pass finally: if filename is not None: prof.dump_stats(filename) else: result = prof.print_stats(sort) return result def runctx(statement, globals, locals, filename=None, sort=-1): """Run statement under profiler, supplying your own globals and locals, optionally saving results in filename. statement and filename have the same semantics as profile.run """ prof = Profile() result = None try: try: prof = prof.runctx(statement, globals, locals) except SystemExit: pass finally: if filename is not None: prof.dump_stats(filename) else: result = prof.print_stats(sort) return result # Backwards compatibility. def help(): print "Documentation for the profile/cProfile modules can be found " print "in the Python Library Reference, section 'The Python Profiler'." # ____________________________________________________________ class Profile(_lsprof.Profiler): """Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True) Builds a profiler object using the specified timer function. The default timer is a fast built-in one based on real time. For custom timer functions returning integers, time_unit can be a float specifying a scale (i.e. how long each integer unit is, in seconds). """ # Most of the functionality is in the base class. # This subclass only adds convenient and backward-compatible methods. def print_stats(self, sort=-1): import pstats pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats() def dump_stats(self, file): import marshal f = open(file, 'wb') self.create_stats() marshal.dump(self.stats, f) f.close() def create_stats(self): self.disable() self.snapshot_stats() def snapshot_stats(self): entries = self.getstats() self.stats = {} callersdicts = {} # call information for entry in entries: func = label(entry.code) nc = entry.callcount # ncalls column of pstats (before '/') cc = nc - entry.reccallcount # ncalls column of pstats (after '/') tt = entry.inlinetime # tottime column of pstats ct = entry.totaltime # cumtime column of pstats callers = {} callersdicts[id(entry.code)] = callers self.stats[func] = cc, nc, tt, ct, callers # subcall information for entry in entries: if entry.calls: func = label(entry.code) for subentry in entry.calls: try: callers = callersdicts[id(subentry.code)] except KeyError: continue nc = subentry.callcount cc = nc - subentry.reccallcount tt = subentry.inlinetime ct = subentry.totaltime if func in callers: prev = callers[func] nc += prev[0] cc += prev[1] tt += prev[2] ct += prev[3] callers[func] = nc, cc, tt, ct # The following two methods can be called by clients to use # a profiler to profile a statement, given as a string. def run(self, cmd): import __main__ dict = __main__.__dict__ return self.runctx(cmd, dict, dict) def runctx(self, cmd, globals, locals): self.enable() try: exec cmd in globals, locals finally: self.disable() return self # This method is more useful to profile a single function call. def runcall(self, func, *args, **kw): self.enable() try: return func(*args, **kw) finally: self.disable() # ____________________________________________________________ def label(code): if isinstance(code, str): return ('~', 0, code) # built-in functions ('~' sorts at the end) else: return (code.co_filename, code.co_firstlineno, code.co_name) # ____________________________________________________________ def main(): import os, sys from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) parser.allow_interspersed_args = False parser.add_option('-o', '--outfile', dest="outfile", help="Save stats to <outfile>", default=None) parser.add_option('-s', '--sort', dest="sort", help="Sort order when printing to stdout, based on pstats.Stats class", default=-1) if not sys.argv[1:]: parser.print_usage() sys.exit(2) (options, args) = parser.parse_args() sys.argv[:] = args if len(args) > 0: progname = args[0] sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') globs = { '__file__': progname, '__name__': '__main__', '__package__': None, } runctx(code, globs, None, options.outfile, options.sort) else: parser.print_usage() return parser # When invoked as main program, invoke the profiler on a script if __name__ == '__main__': main()
gpl-2.0
hbwzhsh/scrapy
scrapy/link.py
56
1253
""" This module defines the Link object used in Link extractors. For actual link extractors implementation see scrapy.linkextractors, or its documentation in: docs/topics/link-extractors.rst """ import six class Link(object): """Link objects represent an extracted link by the LinkExtractor.""" __slots__ = ['url', 'text', 'fragment', 'nofollow'] def __init__(self, url, text='', fragment='', nofollow=False): if isinstance(url, six.text_type): import warnings warnings.warn("Do not instantiate Link objects with unicode urls. " "Assuming utf-8 encoding (which could be wrong)") url = url.encode('utf-8') self.url = url self.text = text self.fragment = fragment self.nofollow = nofollow def __eq__(self, other): return self.url == other.url and self.text == other.text and \ self.fragment == other.fragment and self.nofollow == other.nofollow def __hash__(self): return hash(self.url) ^ hash(self.text) ^ hash(self.fragment) ^ hash(self.nofollow) def __repr__(self): return 'Link(url=%r, text=%r, fragment=%r, nofollow=%r)' % \ (self.url, self.text, self.fragment, self.nofollow)
bsd-3-clause
wndhydrnt/airflow
tests/impersonation.py
15
4984
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import print_function import errno import os import subprocess import unittest import logging from airflow import jobs, models from airflow.utils.state import State from airflow.utils.timezone import datetime DEV_NULL = '/dev/null' TEST_DAG_FOLDER = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dags') DEFAULT_DATE = datetime(2015, 1, 1) TEST_USER = 'airflow_test_user' logger = logging.getLogger(__name__) # TODO(aoen): Adding/remove a user as part of a test is very bad (especially if the user # already existed to begin with on the OS), this logic should be moved into a test # that is wrapped in a container like docker so that the user can be safely added/removed. # When this is done we can also modify the sudoers file to ensure that useradd will work # without any manual modification of the sudoers file by the agent that is running these # tests. class ImpersonationTest(unittest.TestCase): def setUp(self): self.dagbag = models.DagBag( dag_folder=TEST_DAG_FOLDER, include_examples=False, ) logger.info('Loaded DAGS:') logger.info(self.dagbag.dagbag_report()) try: subprocess.check_output(['sudo', 'useradd', '-m', TEST_USER, '-g', str(os.getegid())]) except OSError as e: if e.errno == errno.ENOENT: raise unittest.SkipTest( "The 'useradd' command did not exist so unable to test " "impersonation; Skipping Test. These tests can only be run on a " "linux host that supports 'useradd'." ) else: raise unittest.SkipTest( "The 'useradd' command exited non-zero; Skipping tests. Does the " "current user have permission to run 'useradd' without a password " "prompt (check sudoers file)?" ) def tearDown(self): subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER]) def run_backfill(self, dag_id, task_id): dag = self.dagbag.get_dag(dag_id) dag.clear() jobs.BackfillJob( dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE).run() ti = models.TaskInstance( task=dag.get_task(task_id), execution_date=DEFAULT_DATE) ti.refresh_from_db() self.assertEqual(ti.state, State.SUCCESS) def test_impersonation(self): """ Tests that impersonating a unix user works """ self.run_backfill( 'test_impersonation', 'test_impersonated_user' ) def test_no_impersonation(self): """ If default_impersonation=None, tests that the job is run as the current user (which will be a sudoer) """ self.run_backfill( 'test_no_impersonation', 'test_superuser', ) def test_default_impersonation(self): """ If default_impersonation=TEST_USER, tests that the job defaults to running as TEST_USER for a test without run_as_user set """ os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION'] = TEST_USER try: self.run_backfill( 'test_default_impersonation', 'test_deelevated_user' ) finally: del os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION'] def test_impersonation_custom(self): """ Tests that impersonation using a unix user works with custom packages in PYTHONPATH """ # PYTHONPATH is already set in script triggering tests assert 'PYTHONPATH' in os.environ self.run_backfill( 'impersonation_with_custom_pkg', 'exec_python_fn' ) def test_impersonation_subdag(self): """ Tests that impersonation using a subdag correctly passes the right configuration :return: """ self.run_backfill( 'impersonation_subdag', 'test_subdag_operation' )
apache-2.0
czgu/metaHack
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py
214
4109
## Base Exceptions class HTTPError(Exception): "Base exception used by this module." pass class HTTPWarning(Warning): "Base warning used by this module." pass class PoolError(HTTPError): "Base exception for errors caused within a pool." def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) def __reduce__(self): # For pickling purposes. return self.__class__, (None, None) class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): # For pickling purposes. return self.__class__, (None, self.url, None) class SSLError(HTTPError): "Raised when SSL certificate fails in an HTTPS connection." pass class ProxyError(HTTPError): "Raised when the connection to a proxy fails." pass class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass class ProtocolError(HTTPError): "Raised when something unexpected happens mid-request/response." pass #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError ## Leaf Exceptions class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. :param pool: The connection pool :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` :param string url: The requested Url :param exceptions.Exception reason: The underlying error """ def __init__(self, pool, url, reason=None): self.reason = reason message = "Max retries exceeded with url: %s (Caused by %r)" % ( url, reason) RequestError.__init__(self, pool, url, message) class HostChangedError(RequestError): "Raised when an existing pool gets a request for a foreign host." def __init__(self, pool, url, retries=3): message = "Tried to open a foreign host with url: %s" % url RequestError.__init__(self, pool, url, message) self.retries = retries class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ pass class TimeoutError(HTTPError): """ Raised when a socket timeout error occurs. Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ pass class ReadTimeoutError(TimeoutError, RequestError): "Raised when a socket timeout occurs while receiving data from a server" pass # This timeout error does not have a URL attached and needs to inherit from the # base HTTPError class ConnectTimeoutError(TimeoutError): "Raised when a socket timeout occurs while connecting to a server" pass class EmptyPoolError(PoolError): "Raised when a pool runs out of connections and no more are allowed." pass class ClosedPoolError(PoolError): "Raised when a request enters a pool after the pool has been closed." pass class LocationValueError(ValueError, HTTPError): "Raised when there is something wrong with a given URL input." pass class LocationParseError(LocationValueError): "Raised when get_host or similar fails to parse the URL input." def __init__(self, location): message = "Failed to parse: %s" % location HTTPError.__init__(self, message) self.location = location class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." GENERIC_ERROR = 'too many error responses' SPECIFIC_ERROR = 'too many {status_code} error responses' class SecurityWarning(HTTPWarning): "Warned when perfoming security reducing actions" pass class InsecureRequestWarning(SecurityWarning): "Warned when making an unverified HTTPS request." pass class SystemTimeWarning(SecurityWarning): "Warned when system time is suspected to be wrong" pass
apache-2.0
acfogarty/espressopp
contrib/mpi4py/mpi4py-2.0.0/test/test_cco_buf.py
8
29323
from mpi4py import MPI import mpiunittest as unittest import arrayimpl from functools import reduce prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start) def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 class BaseTestCCOBuf(object): COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.Barrier() def testBcast(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): if rank == root: buf = array(root, typecode, root) else: buf = array( -1, typecode, root) self.COMM.Bcast(buf.as_mpi(), root=root) for value in buf: self.assertEqual(value, root) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): sbuf = array(root, typecode, root+1) if rank == root: rbuf = array(-1, typecode, (size,root+1)) else: rbuf = array([], typecode) self.COMM.Gather(sbuf.as_mpi(), rbuf.as_mpi(), root=root) if rank == root: for value in rbuf.flat: self.assertEqual(value, root) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): rbuf = array(-1, typecode, size) if rank == root: sbuf = array(root, typecode, (size, size)) else: sbuf = array([], typecode) self.COMM.Scatter(sbuf.as_mpi(), rbuf.as_mpi(), root=root) for value in rbuf: self.assertEqual(value, root) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): sbuf = array(root, typecode, root+1) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Allgather(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, root) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): sbuf = array(root, typecode, (size, root+1)) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Alltoall(sbuf.as_mpi(), rbuf.as_mpi_c(root+1)) for value in rbuf.flat: self.assertEqual(value, root) def assertAlmostEqual(self, first, second): num = float(float(second-first)) den = float(second+first)/2 or 1.0 if (abs(num/den) > 1e-2): raise self.failureException('%r != %r' % (first, second)) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): sbuf = array(range(size), typecode) rbuf = array(-1, typecode, size) self.COMM.Reduce(sbuf.as_mpi(), rbuf.as_mpi(), op, root) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if rank != root: self.assertEqual(value, -1) continue if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Allreduce(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): rcnt = list(range(1,size+1)) sbuf = array([rank+1]*sum(rcnt), typecode) rbuf = array(-1, typecode, rank+1) self.COMM.Reduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), None, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) rbuf = array(-1, typecode, rank+1) self.COMM.Reduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), rcnt, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): for rcnt in range(1,size): sbuf = array([rank]*rcnt*size, typecode) rbuf = array(-1, typecode, rcnt) if op == MPI.PROD: sbuf = array([rank+1]*rcnt*size, typecode) self.COMM.Reduce_scatter_block(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) v_sum = (size*(size-1))/2 v_prod = 1 for i in range(1,size+1): v_prod *= i v_max = size-1 v_min = 0 for i, value in enumerate(rbuf): if op == MPI.SUM: if v_sum <= max_val: self.assertAlmostEqual(value, v_sum) elif op == MPI.PROD: if v_prod <= max_val: self.assertAlmostEqual(value, v_prod) elif op == MPI.MAX: self.assertEqual(value, v_max) elif op == MPI.MIN: self.assertEqual(value, v_min) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Scan(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) try: self.COMM.Exscan(sbuf.as_mpi(), rbuf.as_mpi(), op) except NotImplementedError: return if rank == 1: for i, value in enumerate(rbuf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testBcastTypeIndexed(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode, datatype in arrayimpl.TypeMap.items(): for root in range(size): # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(0, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Bcast(newbuf, root=root) newtype.Free() if rank != root: for i, value in enumerate(buf): if (i % 2): self.assertEqual(value, -1) else: self.assertEqual(value, i) # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(1, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Bcast(newbuf, root) newtype.Free() if rank != root: for i, value in enumerate(buf): if not (i % 2): self.assertEqual(value, -1) else: self.assertEqual(value, i) class BaseTestCCOBufInplace(object): def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): count = root+3 if rank == root: sbuf = MPI.IN_PLACE buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(root, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = root rbuf = buf.as_mpi() else: buf = array(root, typecode, count) sbuf = buf.as_mpi() rbuf = None try: self.COMM.Gather(sbuf, rbuf, root=root) except NotImplementedError: return for value in buf.flat: self.assertEqual(value, root) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): for count in range(1, 10): if rank == root: buf = array(root, typecode, (size, count)) sbuf = buf.as_mpi() rbuf = MPI.IN_PLACE else: buf = array(-1, typecode, count) sbuf = None rbuf = buf.as_mpi() try: self.COMM.Scatter(sbuf, rbuf, root=root) except NotImplementedError: return for value in buf.flat: self.assertEqual(value, root) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for count in range(1, 10): buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(count, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = count try: self.COMM.Allgather(MPI.IN_PLACE, buf.as_mpi()) except NotImplementedError: return for value in buf.flat: self.assertEqual(value, count) def assertAlmostEqual(self, first, second): num = float(float(second-first)) den = float(second+first)/2 or 1.0 if (abs(num/den) > 1e-2): raise self.failureException('%r != %r' % (first, second)) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for root in range(size): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): count = size if rank == root: buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() else: buf = array(range(size), typecode) buf2 = array(range(size), typecode) sbuf = buf.as_mpi() rbuf = buf2.as_mpi() try: self.COMM.Reduce(sbuf, rbuf, op, root) except NotImplementedError: return if rank == root: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() self.COMM.Allreduce(sbuf, rbuf, op) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): for rcnt in range(size): if op == MPI.PROD: rbuf = array([rank+1]*rcnt*size, typecode) else: rbuf = array([rank]*rcnt*size, typecode) self.COMM.Reduce_scatter_block(MPI.IN_PLACE, rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): rcnt = list(range(1, size+1)) if op == MPI.PROD: rbuf = array([rank+1]*sum(rcnt), typecode) else: rbuf = array([rank]*sum(rcnt), typecode) self.COMM.Reduce_scatter(MPI.IN_PLACE, rbuf.as_mpi(), rcnt, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt[rank]: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): buf = array(range(size), typecode) self.COMM.Scan(MPI.IN_PLACE, buf.as_mpi(), op) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): buf = array(range(size), typecode) try: self.COMM.Exscan(MPI.IN_PLACE, buf.as_mpi(), op) except NotImplementedError: return if rank == 1: for i, value in enumerate(buf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestReduceLocal(unittest.TestCase): def testReduceLocal(self): for array in arrayimpl.ArrayTypes: for typecode in arrayimpl.TypeMap: for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): size = 5 sbuf = array(range(1,size+1), typecode) rbuf = array(range(0,size+0), typecode) try: op.Reduce_local(sbuf.as_mpi(), rbuf.as_mpi()) except NotImplementedError: return for i, value in enumerate(rbuf): self.assertEqual(sbuf[i], i+1) if op == MPI.SUM: self.assertAlmostEqual(value, i+(i+1)) elif op == MPI.PROD: self.assertAlmostEqual(value, i*(i+1)) elif op == MPI.MAX: self.assertEqual(value, i+1) elif op == MPI.MIN: self.assertEqual(value, i) class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOBufSelfDup(TestCCOBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOBufWorldDup(TestCCOBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() name, version = MPI.get_vendor() if name == 'MPICH1' or name == 'LAM/MPI' or MPI.BOTTOM == MPI.IN_PLACE: del TestCCOBufInplaceSelf del TestCCOBufInplaceWorld elif name == 'Open MPI': if version < (1,8,5): del BaseTestCCOBufInplace.testScan del BaseTestCCOBufInplace.testExscan if version < (1,4,0): if MPI.Query_thread() > MPI.THREAD_SINGLE: del TestCCOBufWorldDup elif name == 'Microsoft MPI': if version <= (4,2,0): del BaseTestCCOBufInplace.testExscan if __name__ == '__main__': unittest.main()
gpl-3.0
habeanf/Open-Knesset
laws/migrations/0007_add_laws_votes_and_committee_meetings.py
15
17457
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding M2M table for field committee_meetings on 'PrivateProposal' db.create_table('laws_privateproposal_committee_meetings', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('privateproposal', models.ForeignKey(orm['laws.privateproposal'], null=False)), ('committeemeeting', models.ForeignKey(orm['committees.committeemeeting'], null=False)) )) db.create_unique('laws_privateproposal_committee_meetings', ['privateproposal_id', 'committeemeeting_id']) # Adding M2M table for field votes on 'PrivateProposal' db.create_table('laws_privateproposal_votes', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('privateproposal', models.ForeignKey(orm['laws.privateproposal'], null=False)), ('vote', models.ForeignKey(orm['laws.vote'], null=False)) )) db.create_unique('laws_privateproposal_votes', ['privateproposal_id', 'vote_id']) # Adding M2M table for field committee_meetings on 'KnessetProposal' db.create_table('laws_knessetproposal_committee_meetings', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('knessetproposal', models.ForeignKey(orm['laws.knessetproposal'], null=False)), ('committeemeeting', models.ForeignKey(orm['committees.committeemeeting'], null=False)) )) db.create_unique('laws_knessetproposal_committee_meetings', ['knessetproposal_id', 'committeemeeting_id']) # Adding M2M table for field votes on 'KnessetProposal' db.create_table('laws_knessetproposal_votes', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('knessetproposal', models.ForeignKey(orm['laws.knessetproposal'], null=False)), ('vote', models.ForeignKey(orm['laws.vote'], null=False)) )) db.create_unique('laws_knessetproposal_votes', ['knessetproposal_id', 'vote_id']) def backwards(self, orm): # Removing M2M table for field committee_meetings on 'PrivateProposal' db.delete_table('laws_privateproposal_committee_meetings') # Removing M2M table for field votes on 'PrivateProposal' db.delete_table('laws_privateproposal_votes') # Removing M2M table for field committee_meetings on 'KnessetProposal' db.delete_table('laws_knessetproposal_committee_meetings') # Removing M2M table for field votes on 'KnessetProposal' db.delete_table('laws_knessetproposal_votes') models = { 'committees.committee': { 'Meta': {'object_name': 'Committee'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'committees.committeemeeting': { 'Meta': {'object_name': 'CommitteeMeeting'}, 'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['committees.Committee']"}), 'date': ('django.db.models.fields.DateField', [], {}), 'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}), 'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'laws.knessetproposal': { 'Meta': {'object_name': 'KnessetProposal'}, 'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}), 'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}), 'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}), 'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}), 'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}) }, 'laws.law': { 'Meta': {'object_name': 'Law'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}) }, 'laws.membervotingstatistics': { 'Meta': {'object_name': 'MemberVotingStatistics'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"}) }, 'laws.partyvotingstatistics': { 'Meta': {'object_name': 'PartyVotingStatistics'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"}) }, 'laws.privateproposal': { 'Meta': {'object_name': 'PrivateProposal'}, 'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}), 'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}), 'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}), 'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}), 'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}) }, 'laws.vote': { 'Meta': {'object_name': 'Vote'}, 'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'importance': ('django.db.models.fields.FloatField', [], {}), 'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}), 'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'laws.voteaction': { 'Meta': {'object_name': 'VoteAction'}, 'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"}) }, 'mks.member': { 'Meta': {'object_name': 'Member'}, 'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'mks.membership': { 'Meta': {'object_name': 'Membership'}, 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}), 'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}) }, 'mks.party': { 'Meta': {'object_name': 'Party'}, 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}) }, 'planet.blog': { 'Meta': {'object_name': 'Blog'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}) }, 'tagging.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'tagging.taggeditem': { 'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"}) } } complete_apps = ['laws']
bsd-3-clause
dstndstn/astrometry.net
net/urls.py
1
10681
from django.conf.urls import include, url from astrometry.net import settings # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = [] from astrometry.net.views.home import home, support, api_help, explore urlpatterns.extend([ url(r'^/?$', home), url(r'^support/?$', support, name='support'), url(r'^api_help/?$', api_help, name='api-help'), # url(r'^new_api_key/?$', 'new_api_key', name='new_api_key'), url(r'^explore/?$', explore, name='explore'), ]) if settings.ENABLE_SOCIAL: urlpatterns.append( url('', include('social.apps.django_app.urls', namespace='social')) ) if settings.ENABLE_SOCIAL2: urlpatterns.append( url('', include('social_django.urls', namespace='social')) ) from astrometry.net.views.home import signin, signout, signedin, newuser urlpatterns.extend([ url(r'^signin/', signin, name='signin'), url(r'^signout/', signout, name='signout'), url(r'^signedin/', signedin), url(r'^newuser/', newuser), ]) from astrometry.net.views.search import images, users urlpatterns.extend([ url(r'^search/images/?$', images), url(r'^search/users/?$', users), ]) jobpattern = r'[0-9-]+' subpattern = r'[0-9-]+' imagepattern = r'[0-9-]+' idpattern = r'[0-9-]+' tagpattern = r'[\s|\S]+' from astrometry.net.views.submission import upload_file, status, job_log_file, job_log_file2, index urlpatterns.extend([ url(r'^upload/?$', upload_file, name='upload-file'), url(r'^status/(?P<subid>' + subpattern + r')/?', status, name='submission_status'), url(r'^joblog/(?P<jobid>' + jobpattern + r')/?', job_log_file, name='job_log_file'), url(r'^joblog2/(?P<jobid>' + jobpattern + r')/?', job_log_file2, name='job_log_file_2'), url(r'^submissions/(?P<user_id>' + idpattern + r')/?$', index), ]) from astrometry.net.views.user import ( index, dashboard, user_profile, dashboard_submissions, dashboard_user_images, dashboard_albums, dashboard_create_album, dashboard_profile, save_profile, user_images, user_albums, user_submissions, user_autocomplete) urlpatterns.extend([ url(r'^dashboard/?$', dashboard, name='dashboard'), #(r'^dashboard/apikey/?$', 'get_api_key'), # made redundant by inclusion of api key in dashboard profile url(r'^dashboard/submissions/?$', dashboard_submissions, name='dashboard_submissions'), url(r'^dashboard/images/?$', dashboard_user_images, name='dashboard_user_images'), url(r'^dashboard/albums/?$', dashboard_albums, name='dashboard_albums'), url(r'^dashboard/create_album/?$', dashboard_create_album, name='dashboard_create_album'), url(r'^dashboard/profile/?$', dashboard_profile, name='dashboard_profile'), url(r'^dashboard/profile/save/?$', save_profile, name='save_profile'), url(r'^users/?$', index, name='users'), url(r'^users/(?P<user_id>' + idpattern + r')/?$', user_profile, name='user_profile'), url(r'^users/(?P<user_id>' + idpattern + r')/images/?$', user_images, name='user_images'), url(r'^users/(?P<user_id>' + idpattern + r')/albums/?$', user_albums, name='user_albums'), url(r'^users/(?P<user_id>' + idpattern + r')/submissions/?$', user_submissions, name='user_submissions'), url(r'^users/autocomplete/?$', user_autocomplete, name='user_autocomplete'), ]) from astrometry.net.views.image import ( index, index_tag, annotated_image, grid_image, index_location, index_nearby, index_recent, index_all, index_by_user, index_user, index_album, hide, unhide, user_image, edit, search, serve_image, image_set, onthesky_image, sdss_image, galex_image, red_green_image, extraction_image, wcs_file, new_fits_file, kml_file, rdls_file, axy_file, corr_file) urlpatterns.extend([ url(r'^annotated_(?P<size>full|display)/(?P<jobid>' + jobpattern + r')/?', annotated_image, name='annotated_image'), url(r'^grid_(?P<size>full|display)/(?P<jobid>' + jobpattern + r')/?', grid_image, name='grid_image'), url(r'^user_images/?$', index, name='images'), url(r'^user_images/tag/?$', index_tag, name='images-tag'), url(r'^user_images/location/?$', index_location, name='images-location'), url(r'^user_images/nearby/(?P<user_image_id>' + idpattern + r')/?$', index_nearby, name='images-nearby'), url(r'^user_images/recent/?$', index_recent), url(r'^user_images/all/?$', index_all), url(r'^user_images/by_user/?$', index_by_user), url(r'^user_images/user/(?P<user_id>' + idpattern + r')/?$', index_user), url(r'^user_images/album/(?P<album_id>' + idpattern + r')/?$', index_album), url(r'^user_images/(?P<user_image_id>' + idpattern + r')/hide/?$', hide), url(r'^user_images/(?P<user_image_id>' + idpattern + r')/unhide/?$', unhide), url(r'^user_images/(?P<user_image_id>' + idpattern + r')/?$', user_image, name='user_image'), url(r'^user_images/(?P<user_image_id>' + idpattern + r')/edit/?$', edit, name='image_edit'), url(r'^user_images/search/?$', search, name='image-search'), url(r'^image/(?P<id>' + imagepattern + r')/?$', serve_image, name='serve_image'), url(r'^images/(?P<category>\w+)/(?P<id>' + idpattern + r')/?$', image_set), url(r'^sky_plot/zoom(?P<zoom>[0-3])/(?P<calid>' + idpattern + r')/?$', onthesky_image, name='onthesky_image'), url(r'^sdss_image_(?P<size>full|display)/(?P<calid>' + idpattern + r')/?$', sdss_image, name='sdss_image'), url(r'^galex_image_(?P<size>full|display)/(?P<calid>' + idpattern + r')/?$', galex_image, name='galex_image'), url(r'^red_green_image_(?P<size>full|display)/(?P<job_id>' + idpattern + r')/?$', red_green_image, name='red_green_image'), url(r'^extraction_image_(?P<size>full|display)/(?P<job_id>' + idpattern + r')/?$', extraction_image, name='extraction_image'), url(r'^wcs_file/(?P<jobid>' + idpattern + r')/?$', wcs_file, name='wcs-file'), url(r'^new_fits_file/(?P<jobid>' + idpattern + r')/?$', new_fits_file, name='new-fits-file'), url(r'^kml_file/(?P<jobid>' + idpattern + r')/?$', kml_file, name='kml-file'), url(r'^rdls_file/(?P<jobid>' + idpattern + r')/?$', rdls_file, name='rdls-file'), url(r'^axy_file/(?P<jobid>' + idpattern + r')/?$', axy_file, name='axy-file'), url(r'^corr_file/(?P<jobid>' + idpattern + r')/?$', corr_file, name='corr-file'), ]) # # urlpatterns += patterns('astrometry.net.views.enhance', # (r'^enhance_ui/(?P<user_image_id>' + idpattern + r')/?$', 'enhanced_ui'), # url(r'^enhanced_image_(?P<size>full|display)/(?P<job_id>' + idpattern + r')/?$', 'enhanced_image', name='enhanced_image'), # ) # from astrometry.net.views.album import album, delete as album_delete, edit, new as album_new urlpatterns.extend([ url(r'^albums/(?P<album_id>' + idpattern + r')/?$', album, name='album'), url(r'^albums/(?P<album_id>' + idpattern + r')/delete/?$', album_delete, name='album_delete'), url(r'^albums/(?P<album_id>' + idpattern + r')/edit/?$', edit, name='album_edit'), url(r'^albums/new/?$', album_new, name='album_new'), ]) from astrometry.net.views.tag import index, delete, new, tag_autocomplete urlpatterns.extend([ url(r'^tags/?$', index, name='tags'), url(r'^(?P<category>\w+)/(?P<recipient_id>' + idpattern + r')/tags/(?P<tag_id>' + tagpattern + r')/delete/?$', delete, name='tag_delete'), url(r'^(?P<category>\w+)/(?P<recipient_id>' + idpattern + r')/tags/new/?$', new, name='tag_new'), url(r'^tags/autocomplete/?$', tag_autocomplete, name='tag_autocomplete'), ]) from astrometry.net.views.flag import update_flags urlpatterns.append( url(r'^(?P<category>\w+)/(?P<recipient_id>' + idpattern + r')/flags/update/?$', update_flags, name='update_flags'), ) from astrometry.net.views.comment import new as new_comment, delete as delete_comment urlpatterns.extend([ url(r'^(?P<category>\w+)/(?P<recipient_id>' + idpattern + r')/comments/new/?$', new_comment, name='comment_new'), url(r'^comments/(?P<comment_id>' + idpattern + r')/delete/?$', delete_comment, name='comment_delete'), ]) from astrometry.net.views.license import edit urlpatterns.append( url(r'^(?P<licensable_type>\w+)/(?P<licensable_id>' + idpattern + r')/license/edit/?$', edit, name='edit_license') ) # psidpattern = r'[0-9-]+' # # urlpatterns += patterns('astrometry.net.views.admin', # (r'^admin/procsub/(?P<psid>'+psidpattern + r')?$', 'procsub'), # (r'^admin/?', 'index'), # ) from astrometry.net.api import ( api_login, api_upload, url_upload, api_sdss_image_for_wcs, api_galex_image_for_wcs, api_submission_images, submission_status, myjobs, job_status, calibration, tags, machine_tags, objects_in_field, annotations_in_field, job_info, jobs_by_tag) urlpatterns.extend([ url(r'^api/login/?$', api_login, name='api_login'), url(r'^api/upload/?$', api_upload, name='api_upload'), url(r'^api/url_upload/?$', url_upload, name='api_url_upload'), url(r'^api/sdss_image_for_wcs/?$', api_sdss_image_for_wcs, name='api_sdss_image_for_wcs'), url(r'^api/galex_image_for_wcs/?$', api_galex_image_for_wcs, name='api_galex_image_for_wcs'), url(r'^api/submission_images/?$', api_submission_images, name='api_submission_images'), url(r'^api/submissions/(?P<sub_id>' + idpattern + r')/?$', submission_status, name='api_submission_status'), url(r'^api/myjobs/', myjobs, name='api_myjobs'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/?$', job_status, name='api_job_status'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/calibration/?$', calibration, name='api_calibration'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/tags/?$', tags, name='api_tags'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/machine_tags/?$', machine_tags, name='api_machine_tags'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/objects_in_field/?$', objects_in_field, name='api_objects_in_field'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/annotations/?$', annotations_in_field, name='api_annotations_in_field'), url(r'^api/jobs/(?P<job_id>' + idpattern + r')/info/?$', job_info, name='api_job_info'), url(r'^api/jobs_by_tag/?$', jobs_by_tag, name='api_jobs_by_tag'), #(r'^api/logout/?', 'logout'), ]) # # # static file serving in development # if settings.DEBUG: # urlpatterns += patterns('', # (r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATICFILES_DIRS[0]}), # ) # fallback from astrometry.net.views.home import home urlpatterns.append( url(r'', home, name='home'), )
bsd-3-clause
AlexOugh/horizon
openstack_dashboard/dashboards/admin/volumes/snapshots/tests.py
34
4219
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import http from mox import IsA # noqa from openstack_dashboard.api import cinder from openstack_dashboard.test import helpers as test INDEX_URL = reverse('horizon:admin:volumes:index') class VolumeSnapshotsViewTests(test.BaseAdminViewTests): @test.create_stubs({cinder: ('volume_snapshot_reset_state', 'volume_snapshot_get')}) def test_update_snapshot_status(self): snapshot = self.cinder_volume_snapshots.first() state = 'error' cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id) \ .AndReturn(snapshot) cinder.volume_snapshot_reset_state(IsA(http.HttpRequest), snapshot.id, state) self.mox.ReplayAll() formData = {'status': state} url = reverse('horizon:admin:volumes:snapshots:update_status', args=(snapshot.id,)) res = self.client.post(url, formData) self.assertNoFormErrors(res) @test.create_stubs({cinder: ('volume_snapshot_get', 'volume_get')}) def test_get_volume_snapshot_details(self): volume = self.cinder_volumes.first() snapshot = self.cinder_volume_snapshots.first() cinder.volume_get(IsA(http.HttpRequest), volume.id). \ AndReturn(volume) cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id). \ AndReturn(snapshot) self.mox.ReplayAll() url = reverse('horizon:admin:volumes:snapshots:detail', args=[snapshot.id]) res = self.client.get(url) self.assertContains(res, "<h1>Volume Snapshot Details: %s</h1>" % snapshot.name, 1, 200) self.assertContains(res, "<dd>test snapshot</dd>", 1, 200) self.assertContains(res, "<dd>%s</dd>" % snapshot.id, 1, 200) self.assertContains(res, "<dd>Available</dd>", 1, 200) @test.create_stubs({cinder: ('volume_snapshot_get', 'volume_get')}) def test_get_volume_snapshot_details_with_snapshot_exception(self): # Test to verify redirect if get volume snapshot fails snapshot = self.cinder_volume_snapshots.first() cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id).\ AndRaise(self.exceptions.cinder) self.mox.ReplayAll() url = reverse('horizon:admin:volumes:snapshots:detail', args=[snapshot.id]) res = self.client.get(url) self.assertNoFormErrors(res) self.assertMessageCount(error=1) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({cinder: ('volume_snapshot_get', 'volume_get')}) def test_get_volume_snapshot_details_with_volume_exception(self): # Test to verify redirect if get volume fails volume = self.cinder_volumes.first() snapshot = self.cinder_volume_snapshots.first() cinder.volume_get(IsA(http.HttpRequest), volume.id). \ AndRaise(self.exceptions.cinder) cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id). \ AndReturn(snapshot) self.mox.ReplayAll() url = reverse('horizon:admin:volumes:snapshots:detail', args=[snapshot.id]) res = self.client.get(url) self.assertNoFormErrors(res) self.assertMessageCount(error=1) self.assertRedirectsNoFollow(res, INDEX_URL)
apache-2.0
deadRaccoons/TestAirlines
tabo/cherrypy/cherrypy/_cplogging.py
29
17181
""" Simple config ============= Although CherryPy uses the :mod:`Python logging module <logging>`, it does so behind the scenes so that simple logging is simple, but complicated logging is still possible. "Simple" logging means that you can log to the screen (i.e. console/stdout) or to a file, and that you can easily have separate error and access log files. Here are the simplified logging settings. You use these by adding lines to your config file or dict. You should set these at either the global level or per application (see next), but generally not both. * ``log.screen``: Set this to True to have both "error" and "access" messages printed to stdout. * ``log.access_file``: Set this to an absolute filename where you want "access" messages written. * ``log.error_file``: Set this to an absolute filename where you want "error" messages written. Many events are automatically logged; to log your own application events, call :func:`cherrypy.log`. Architecture ============ Separate scopes --------------- CherryPy provides log managers at both the global and application layers. This means you can have one set of logging rules for your entire site, and another set of rules specific to each application. The global log manager is found at :func:`cherrypy.log`, and the log manager for each application is found at :attr:`app.log<cherrypy._cptree.Application.log>`. If you're inside a request, the latter is reachable from ``cherrypy.request.app.log``; if you're outside a request, you'll have to obtain a reference to the ``app``: either the return value of :func:`tree.mount()<cherrypy._cptree.Tree.mount>` or, if you used :func:`quickstart()<cherrypy.quickstart>` instead, via ``cherrypy.tree.apps['/']``. By default, the global logs are named "cherrypy.error" and "cherrypy.access", and the application logs are named "cherrypy.error.2378745" and "cherrypy.access.2378745" (the number is the id of the Application object). This means that the application logs "bubble up" to the site logs, so if your application has no log handlers, the site-level handlers will still log the messages. Errors vs. Access ----------------- Each log manager handles both "access" messages (one per HTTP request) and "error" messages (everything else). Note that the "error" log is not just for errors! The format of access messages is highly formalized, but the error log isn't--it receives messages from a variety of sources (including full error tracebacks, if enabled). If you are logging the access log and error log to the same source, then there is a possibility that a specially crafted error message may replicate an access log message as described in CWE-117. In this case it is the application developer's responsibility to manually escape data before using CherryPy's log() functionality, or they may create an application that is vulnerable to CWE-117. This would be achieved by using a custom handler escape any special characters, and attached as described below. Custom Handlers =============== The simple settings above work by manipulating Python's standard :mod:`logging` module. So when you need something more complex, the full power of the standard module is yours to exploit. You can borrow or create custom handlers, formats, filters, and much more. Here's an example that skips the standard FileHandler and uses a RotatingFileHandler instead: :: #python log = app.log # Remove the default FileHandlers if present. log.error_file = "" log.access_file = "" maxBytes = getattr(log, "rot_maxBytes", 10000000) backupCount = getattr(log, "rot_backupCount", 1000) # Make a new RotatingFileHandler for the error log. fname = getattr(log, "rot_error_file", "error.log") h = handlers.RotatingFileHandler(fname, 'a', maxBytes, backupCount) h.setLevel(DEBUG) h.setFormatter(_cplogging.logfmt) log.error_log.addHandler(h) # Make a new RotatingFileHandler for the access log. fname = getattr(log, "rot_access_file", "access.log") h = handlers.RotatingFileHandler(fname, 'a', maxBytes, backupCount) h.setLevel(DEBUG) h.setFormatter(_cplogging.logfmt) log.access_log.addHandler(h) The ``rot_*`` attributes are pulled straight from the application log object. Since "log.*" config entries simply set attributes on the log object, you can add custom attributes to your heart's content. Note that these handlers are used ''instead'' of the default, simple handlers outlined above (so don't set the "log.error_file" config entry, for example). """ import datetime import logging # Silence the no-handlers "warning" (stderr write!) in stdlib logging logging.Logger.manager.emittedNoHandlerWarning = 1 logfmt = logging.Formatter("%(message)s") import os import sys import cherrypy from cherrypy import _cperror from cherrypy._cpcompat import ntob, py3k class NullHandler(logging.Handler): """A no-op logging handler to silence the logging.lastResort handler.""" def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None class LogManager(object): """An object to assist both simple and advanced logging. ``cherrypy.log`` is an instance of this class. """ appid = None """The id() of the Application object which owns this log manager. If this is a global log manager, appid is None.""" error_log = None """The actual :class:`logging.Logger` instance for error messages.""" access_log = None """The actual :class:`logging.Logger` instance for access messages.""" if py3k: access_log_format = \ '{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}"' else: access_log_format = \ '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' logger_root = None """The "top-level" logger name. This string will be used as the first segment in the Logger names. The default is "cherrypy", for example, in which case the Logger names will be of the form:: cherrypy.error.<appid> cherrypy.access.<appid> """ def __init__(self, appid=None, logger_root="cherrypy"): self.logger_root = logger_root self.appid = appid if appid is None: self.error_log = logging.getLogger("%s.error" % logger_root) self.access_log = logging.getLogger("%s.access" % logger_root) else: self.error_log = logging.getLogger( "%s.error.%s" % (logger_root, appid)) self.access_log = logging.getLogger( "%s.access.%s" % (logger_root, appid)) self.error_log.setLevel(logging.INFO) self.access_log.setLevel(logging.INFO) # Silence the no-handlers "warning" (stderr write!) in stdlib logging self.error_log.addHandler(NullHandler()) self.access_log.addHandler(NullHandler()) cherrypy.engine.subscribe('graceful', self.reopen_files) def reopen_files(self): """Close and reopen all file handlers.""" for log in (self.error_log, self.access_log): for h in log.handlers: if isinstance(h, logging.FileHandler): h.acquire() h.stream.close() h.stream = open(h.baseFilename, h.mode) h.release() def error(self, msg='', context='', severity=logging.INFO, traceback=False): """Write the given ``msg`` to the error log. This is not just for errors! Applications may call this at any time to log application-specific information. If ``traceback`` is True, the traceback of the current exception (if any) will be appended to ``msg``. """ if traceback: msg += _cperror.format_exc() self.error_log.log(severity, ' '.join((self.time(), context, msg))) def __call__(self, *args, **kwargs): """An alias for ``error``.""" return self.error(*args, **kwargs) def access(self): """Write to the access log (in Apache/NCSA Combined Log format). See the `apache documentation <http://httpd.apache.org/docs/current/logs.html#combined>`_ for format details. CherryPy calls this automatically for you. Note there are no arguments; it collects the data itself from :class:`cherrypy.request<cherrypy._cprequest.Request>`. Like Apache started doing in 2.0.46, non-printable and other special characters in %r (and we expand that to all parts) are escaped using \\xhh sequences, where hh stands for the hexadecimal representation of the raw byte. Exceptions from this rule are " and \\, which are escaped by prepending a backslash, and all whitespace characters, which are written in their C-style notation (\\n, \\t, etc). """ request = cherrypy.serving.request remote = request.remote response = cherrypy.serving.response outheaders = response.headers inheaders = request.headers if response.output_status is None: status = "-" else: status = response.output_status.split(ntob(" "), 1)[0] if py3k: status = status.decode('ISO-8859-1') atoms = {'h': remote.name or remote.ip, 'l': '-', 'u': getattr(request, "login", None) or "-", 't': self.time(), 'r': request.request_line, 's': status, 'b': dict.get(outheaders, 'Content-Length', '') or "-", 'f': dict.get(inheaders, 'Referer', ''), 'a': dict.get(inheaders, 'User-Agent', ''), 'o': dict.get(inheaders, 'Host', '-'), } if py3k: for k, v in atoms.items(): if not isinstance(v, str): v = str(v) v = v.replace('"', '\\"').encode('utf8') # Fortunately, repr(str) escapes unprintable chars, \n, \t, etc # and backslash for us. All we have to do is strip the quotes. v = repr(v)[2:-1] # in python 3.0 the repr of bytes (as returned by encode) # uses double \'s. But then the logger escapes them yet, again # resulting in quadruple slashes. Remove the extra one here. v = v.replace('\\\\', '\\') # Escape double-quote. atoms[k] = v try: self.access_log.log( logging.INFO, self.access_log_format.format(**atoms)) except: self(traceback=True) else: for k, v in atoms.items(): if isinstance(v, unicode): v = v.encode('utf8') elif not isinstance(v, str): v = str(v) # Fortunately, repr(str) escapes unprintable chars, \n, \t, etc # and backslash for us. All we have to do is strip the quotes. v = repr(v)[1:-1] # Escape double-quote. atoms[k] = v.replace('"', '\\"') try: self.access_log.log( logging.INFO, self.access_log_format % atoms) except: self(traceback=True) def time(self): """Return now() in Apache Common Log Format (no timezone).""" now = datetime.datetime.now() monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] month = monthnames[now.month - 1].capitalize() return ('[%02d/%s/%04d:%02d:%02d:%02d]' % (now.day, month, now.year, now.hour, now.minute, now.second)) def _get_builtin_handler(self, log, key): for h in log.handlers: if getattr(h, "_cpbuiltin", None) == key: return h # ------------------------- Screen handlers ------------------------- # def _set_screen_handler(self, log, enable, stream=None): h = self._get_builtin_handler(log, "screen") if enable: if not h: if stream is None: stream = sys.stderr h = logging.StreamHandler(stream) h.setFormatter(logfmt) h._cpbuiltin = "screen" log.addHandler(h) elif h: log.handlers.remove(h) def _get_screen(self): h = self._get_builtin_handler has_h = h(self.error_log, "screen") or h(self.access_log, "screen") return bool(has_h) def _set_screen(self, newvalue): self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr) self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout) screen = property(_get_screen, _set_screen, doc="""Turn stderr/stdout logging on or off. If you set this to True, it'll add the appropriate StreamHandler for you. If you set it to False, it will remove the handler. """) # -------------------------- File handlers -------------------------- # def _add_builtin_file_handler(self, log, fname): h = logging.FileHandler(fname) h.setFormatter(logfmt) h._cpbuiltin = "file" log.addHandler(h) def _set_file_handler(self, log, filename): h = self._get_builtin_handler(log, "file") if filename: if h: if h.baseFilename != os.path.abspath(filename): h.close() log.handlers.remove(h) self._add_builtin_file_handler(log, filename) else: self._add_builtin_file_handler(log, filename) else: if h: h.close() log.handlers.remove(h) def _get_error_file(self): h = self._get_builtin_handler(self.error_log, "file") if h: return h.baseFilename return '' def _set_error_file(self, newvalue): self._set_file_handler(self.error_log, newvalue) error_file = property(_get_error_file, _set_error_file, doc="""The filename for self.error_log. If you set this to a string, it'll add the appropriate FileHandler for you. If you set it to ``None`` or ``''``, it will remove the handler. """) def _get_access_file(self): h = self._get_builtin_handler(self.access_log, "file") if h: return h.baseFilename return '' def _set_access_file(self, newvalue): self._set_file_handler(self.access_log, newvalue) access_file = property(_get_access_file, _set_access_file, doc="""The filename for self.access_log. If you set this to a string, it'll add the appropriate FileHandler for you. If you set it to ``None`` or ``''``, it will remove the handler. """) # ------------------------- WSGI handlers ------------------------- # def _set_wsgi_handler(self, log, enable): h = self._get_builtin_handler(log, "wsgi") if enable: if not h: h = WSGIErrorHandler() h.setFormatter(logfmt) h._cpbuiltin = "wsgi" log.addHandler(h) elif h: log.handlers.remove(h) def _get_wsgi(self): return bool(self._get_builtin_handler(self.error_log, "wsgi")) def _set_wsgi(self, newvalue): self._set_wsgi_handler(self.error_log, newvalue) wsgi = property(_get_wsgi, _set_wsgi, doc="""Write errors to wsgi.errors. If you set this to True, it'll add the appropriate :class:`WSGIErrorHandler<cherrypy._cplogging.WSGIErrorHandler>` for you (which writes errors to ``wsgi.errors``). If you set it to False, it will remove the handler. """) class WSGIErrorHandler(logging.Handler): "A handler class which writes logging records to environ['wsgi.errors']." def flush(self): """Flushes the stream.""" try: stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors') except (AttributeError, KeyError): pass else: stream.flush() def emit(self, record): """Emit a record.""" try: stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors') except (AttributeError, KeyError): pass else: try: msg = self.format(record) fs = "%s\n" import types # if no unicode support... if not hasattr(types, "UnicodeType"): stream.write(fs % msg) else: try: stream.write(fs % msg) except UnicodeError: stream.write(fs % msg.encode("UTF-8")) self.flush() except: self.handleError(record)
gpl-2.0
Nick-Hall/gramps
gramps/gen/filters/rules/person/_hasevent.py
5
2501
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Filter rule to match persons with a particular event. """ #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from ....lib.eventroletype import EventRoleType from .._haseventbase import HasEventBase #------------------------------------------------------------------------- # # HasEvent # #------------------------------------------------------------------------- class HasEvent(HasEventBase): """Rule that checks for a person with a particular value""" labels = [ _('Personal event:'), _('Date:'), _('Place:'), _('Description:'), _('Main Participants:'), _('Primary Role:') ] name = _('People with the personal <event>') description = _("Matches people with a personal event of a particular " "value") def apply(self, dbase, person): for event_ref in person.get_event_ref_list(): if not event_ref: continue if int(self.list[5]) and event_ref.role != EventRoleType.PRIMARY: # Only match primaries, no witnesses continue event = dbase.get_event_from_handle(event_ref.ref) if HasEventBase.apply(self, dbase, event): return True return False
gpl-2.0
SwagColoredKitteh/servo
tests/wpt/web-platform-tests/tools/wptserve/wptserve/stash.py
125
5248
import base64 import json import os import uuid from multiprocessing.managers import BaseManager, DictProxy class ServerDictManager(BaseManager): shared_data = {} def _get_shared(): return ServerDictManager.shared_data ServerDictManager.register("get_dict", callable=_get_shared, proxytype=DictProxy) class ClientDictManager(BaseManager): pass ClientDictManager.register("get_dict") class StashServer(object): def __init__(self, address=None, authkey=None): self.address = address self.authkey = authkey self.manager = None def __enter__(self): self.manager, self.address, self.authkey = start_server(self.address, self.authkey) store_env_config(self.address, self.authkey) def __exit__(self, *args, **kwargs): if self.manager is not None: self.manager.shutdown() def load_env_config(): address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"]) if isinstance(address, list): address = tuple(address) else: address = str(address) authkey = base64.decodestring(authkey) return address, authkey def store_env_config(address, authkey): authkey = base64.encodestring(authkey) os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey)) def start_server(address=None, authkey=None): manager = ServerDictManager(address, authkey) manager.start() return (manager, manager._address, manager._authkey) #TODO: Consider expiring values after some fixed time for long-running #servers class Stash(object): """Key-value store for persisting data across HTTP/S and WS/S requests. This data store is specifically designed for persisting data across server requests. The synchronization is achieved by using the BaseManager from the multiprocessing module so different processes can acccess the same data. Stash can be used interchangeably between HTTP, HTTPS, WS and WSS servers. A thing to note about WS/S servers is that they require additional steps in the handlers for accessing the same underlying shared data in the Stash. This can usually be achieved by using load_env_config(). When using Stash interchangeably between HTTP/S and WS/S request, the path part of the key should be expliclitly specified if accessing the same key/value subset. The store has several unusual properties. Keys are of the form (path, uuid), where path is, by default, the path in the HTTP request and uuid is a unique id. In addition, the store is write-once, read-once, i.e. the value associated with a particular key cannot be changed once written and the read operation (called "take") is destructive. Taken together, these properties make it difficult for data to accidentally leak between different resources or different requests for the same resource. """ _proxy = None def __init__(self, default_path, address=None, authkey=None): self.default_path = default_path self.data = self._get_proxy(address, authkey) def _get_proxy(self, address=None, authkey=None): if address is None and authkey is None: Stash._proxy = {} if Stash._proxy is None: manager = ClientDictManager(address, authkey) manager.connect() Stash._proxy = manager.get_dict() return Stash._proxy def _wrap_key(self, key, path): if path is None: path = self.default_path # This key format is required to support using the path. Since the data # passed into the stash can be a DictProxy which wouldn't detect changes # when writing to a subdict. return (str(path), str(uuid.UUID(key))) def put(self, key, value, path=None): """Place a value in the shared stash. :param key: A UUID to use as the data's key. :param value: The data to store. This can be any python object. :param path: The path that has access to read the data (by default the current request path)""" if value is None: raise ValueError("SharedStash value may not be set to None") internal_key = self._wrap_key(key, path) if internal_key in self.data: raise StashError("Tried to overwrite existing shared stash value " "for key %s (old value was %s, new value is %s)" % (internal_key, self.data[str(internal_key)], value)) else: self.data[internal_key] = value def take(self, key, path=None): """Remove a value from the shared stash and return it. :param key: A UUID to use as the data's key. :param path: The path that has access to read the data (by default the current request path)""" internal_key = self._wrap_key(key, path) value = self.data.get(internal_key, None) if value is not None: try: self.data.pop(internal_key) except KeyError: # Silently continue when pop error occurs. pass return value class StashError(Exception): pass
mpl-2.0
lupyuen/RaspberryPiImage
home/pi/GrovePi/Software/Python/others/temboo/Library/Google/Contacts/GetContactsWithQuery.py
4
5460
# -*- coding: utf-8 -*- ############################################################################### # # GetContactsWithQuery # Retrieves the contact or contacts in that account that match a specified query term. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class GetContactsWithQuery(Choreography): def __init__(self, temboo_session): """ Create a new instance of the GetContactsWithQuery Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(GetContactsWithQuery, self).__init__(temboo_session, '/Library/Google/Contacts/GetContactsWithQuery') def new_input_set(self): return GetContactsWithQueryInputSet() def _make_result_set(self, result, path): return GetContactsWithQueryResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetContactsWithQueryChoreographyExecution(session, exec_id, path) class GetContactsWithQueryInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the GetContactsWithQuery Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((optional, string) The access token retrieved in the last step of the OAuth process. Access tokens that are expired will be refreshed and returned in the Choreo output.) """ super(GetContactsWithQueryInputSet, self)._set_input('AccessToken', value) def set_ClientID(self, value): """ Set the value of the ClientID input for this Choreo. ((required, string) The OAuth client ID provided by Google when you register your application.) """ super(GetContactsWithQueryInputSet, self)._set_input('ClientID', value) def set_ClientSecret(self, value): """ Set the value of the ClientSecret input for this Choreo. ((required, string) The OAuth client secret provided by Google when you registered your application.) """ super(GetContactsWithQueryInputSet, self)._set_input('ClientSecret', value) def set_Query(self, value): """ Set the value of the Query input for this Choreo. ((required, string) The contact criteria to search for, such as name or email address.) """ super(GetContactsWithQueryInputSet, self)._set_input('Query', value) def set_RefreshToken(self, value): """ Set the value of the RefreshToken input for this Choreo. ((required, string) The refresh token retrieved in the last step of the OAuth process. This is used when an access token is expired or not provided.) """ super(GetContactsWithQueryInputSet, self)._set_input('RefreshToken', value) class GetContactsWithQueryResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the GetContactsWithQuery Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Google.) """ return self._output.get('Response', None) def get_AccessToken(self): """ Retrieve the value for the "AccessToken" output from this Choreo execution. ((optional, string) The access token retrieved in the last step of the OAuth process. Access tokens that are expired will be refreshed and returned in the Choreo output.) """ return self._output.get('AccessToken', None) def get_ContactID(self): """ Retrieve the value for the "ContactID" output from this Choreo execution. ((string) The unique ID string for the retrieved contact. If more than one contact is retrieved by the request, only the first contact's ID is output.) """ return self._output.get('ContactID', None) def get_Link(self): """ Retrieve the value for the "Link" output from this Choreo execution. ((string) The unique edit link for the retrieved contact. If more than one contact is retrieved by the request, only the first contact's edit link is output.) """ return self._output.get('Link', None) class GetContactsWithQueryChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return GetContactsWithQueryResultSet(response, path)
apache-2.0
chrisfilo/NeuroVault
neurovault/apps/statmaps/migrations/0060_auto_20160103_0406.py
4
2687
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('statmaps', '0059_auto_20160101_0410'), ] operations = [ migrations.AlterField( model_name='collection', name='DOI', field=models.CharField(null=True, default=None, max_length=200, blank=True, unique=True, verbose_name=b'DOI of the corresponding paper (required if you want your maps to be archived in Stanford Digital Repository)'), preserve_default=True, ), migrations.AlterField( model_name='nidmresultstatisticmap', name='analysis_level', field=models.CharField(choices=[(b'S', b'single-subject'), (b'G', b'group'), (b'M', b'meta-analysis'), (b'Other', b'other')], max_length=200, blank=True, help_text=b'What level of summary data was used as the input to this analysis?', null=True, verbose_name=b'Analysis level'), preserve_default=True, ), migrations.AlterField( model_name='nidmresultstatisticmap', name='map_type', field=models.CharField(help_text=b'Type of statistic that is the basis of the inference', max_length=200, verbose_name=b'Map type', choices=[(b'T', b'T map'), (b'Z', b'Z map'), (b'F', b'F map'), (b'X2', b'Chi squared map'), (b'P', b'P map (given null hypothesis)'), (b'M', b'multivariate-beta map'), (b'U', b'univariate-beta map'), (b'R', b'ROI/mask'), (b'Pa', b'parcellation'), (b'A', b'anatomical'), (b'Other', b'other')]), preserve_default=True, ), migrations.AlterField( model_name='statisticmap', name='analysis_level', field=models.CharField(choices=[(b'S', b'single-subject'), (b'G', b'group'), (b'M', b'meta-analysis'), (b'Other', b'other')], max_length=200, blank=True, help_text=b'What level of summary data was used as the input to this analysis?', null=True, verbose_name=b'Analysis level'), preserve_default=True, ), migrations.AlterField( model_name='statisticmap', name='map_type', field=models.CharField(help_text=b'Type of statistic that is the basis of the inference', max_length=200, verbose_name=b'Map type', choices=[(b'T', b'T map'), (b'Z', b'Z map'), (b'F', b'F map'), (b'X2', b'Chi squared map'), (b'P', b'P map (given null hypothesis)'), (b'M', b'multivariate-beta map'), (b'U', b'univariate-beta map'), (b'R', b'ROI/mask'), (b'Pa', b'parcellation'), (b'A', b'anatomical'), (b'Other', b'other')]), preserve_default=True, ), ]
mit
UKTradeInvestment/export-wins-data
users/managers.py
1
1308
from django.contrib.auth.models import UserManager as BaseUserManager class UserManager(BaseUserManager): """ Much of this is just copied out of BaseUserManager, excluding the requirement for a username. """ def _create_user(self, email, password, **extra_fields): """ Creates and saves a User with the given username, email and password. """ email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_user(self, email=None, password=None, **extra_fields): extra_fields.setdefault('is_staff', False) extra_fields.setdefault('is_superuser', False) return self._create_user(email, password, **extra_fields) def create_superuser(self, email, password, **extra_fields): extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') return self._create_user(email, password, **extra_fields)
gpl-3.0
PyQwt/PyQwt
junk/PyEdit.py
1
2691
#!/usr/bin/env python import sys from PQTokenize import * from keyword import * from qt import * class PyEdit(QTextEdit): def __init__(self, parent=None): QTextEdit.__init__(self, parent) # user interface setup self.setTextFormat(QTextEdit.PlainText) #self.setWrapPolicy(QTextEdit.Anywhere) self.setCaption('PyEdit -- a Python Editor for PyQt') font = QFont("Fixed", 12) font.setFixedPitch(1) self.setFont(font) # geometry height = 40*QFontMetrics(font).lineSpacing() request = QSize(600, height) if parent is not None: request = request.boundedTo(parent.size()) self.resize(request) self.y = 0 self.x = 0 def __insertWhite(self, y, x): if (y > self.y): # whitespace after a newline self.insertAt(' '*x, y, 0) else: # whitespace between tokens on the same line self.insertAt(' '*(x-self.x), y, self.x) def __insertToken(self, token, sy, sx, ey, ex): self.insertAt(token, sy, sx) self.y, self.x = ey, ex def fontify(self, type, token, (sy, sx), (ey, ex), line): """ Insert fontified text at the current cursor position. """ print "(%d,%d)->(%d,%d):\t%s\t%s" % \ (sy, sx, ey, ex, tok_name[type], repr(token)) self.__insertWhite(sy, sx) if type == NAME: if iskeyword(token): self.setBold(1) elif type == STRING: self.setColor(Qt.darkGreen) elif type == 52: self.setColor(Qt.red) self.__insertToken(token, sy, sx, ey, ex) self.setBold(0) if type == 1 and token in ['class', 'def']: self.setColor(Qt.blue) else: self.setColor(Qt.black) def focusNextPrevChild(self, next): """ Suppress tabbing to the next window in multi-line commands. """ if next and self.more: return 0 return QTextEdit.focusNextPrevChild(self, next) def mousePressEvent(self, e): """ Keep the cursor after the last prompt. """ if e.button() == Qt.LeftButton: self.moveCursor(QTextEdit.MoveEnd, 0) return def contentsContextMenuEvent(self,ev): """ Suppress the right button context menu. """ return if __name__ == '__main__': a = QApplication(sys.argv) w = PyEdit() a.setMainWidget(w) file = open('PyEdit.py') tokenize(file.readline, w.fontify) file.close() w.show() a.exec_loop()
gpl-2.0
whs/django
django/db/backends/base/creation.py
20
11972
import sys from io import StringIO from django.apps import apps from django.conf import settings from django.core import serializers from django.db import router # The prefix to put on the default database name when creating # the test database. TEST_DATABASE_PREFIX = 'test_' class BaseDatabaseCreation: """ Encapsulate backend-specific differences pertaining to creation and destruction of the test database. """ def __init__(self, connection): self.connection = connection @property def _nodb_connection(self): """ Used to be defined here, now moved to DatabaseWrapper. """ return self.connection._nodb_connection def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=False): """ Create a test database, prompting the user for confirmation if the database already exists. Return the name of the test database created. """ # Don't import django.core.management if it isn't needed. from django.core.management import call_command test_database_name = self._get_test_db_name() if verbosity >= 1: action = 'Creating' if keepdb: action = "Using existing" print("%s test database for alias %s..." % ( action, self._get_database_display_str(verbosity, test_database_name), )) # We could skip this call if keepdb is True, but we instead # give it the keepdb param. This is to handle the case # where the test DB doesn't exist, in which case we need to # create it, then just not destroy it. If we instead skip # this, we will get an exception. self._create_test_db(verbosity, autoclobber, keepdb) self.connection.close() settings.DATABASES[self.connection.alias]["NAME"] = test_database_name self.connection.settings_dict["NAME"] = test_database_name # We report migrate messages at one level lower than that requested. # This ensures we don't get flooded with messages during testing # (unless you really ask to be flooded). call_command( 'migrate', verbosity=max(verbosity - 1, 0), interactive=False, database=self.connection.alias, run_syncdb=True, ) # We then serialize the current state of the database into a string # and store it on the connection. This slightly horrific process is so people # who are testing on databases without transactions or who are using # a TransactionTestCase still get a clean database on every test run. if serialize: self.connection._test_serialized_contents = self.serialize_db_to_string() call_command('createcachetable', database=self.connection.alias) # Ensure a connection for the side effect of initializing the test database. self.connection.ensure_connection() return test_database_name def set_as_test_mirror(self, primary_settings_dict): """ Set this database up to be used in testing as a mirror of a primary database whose settings are given. """ self.connection.settings_dict['NAME'] = primary_settings_dict['NAME'] def serialize_db_to_string(self): """ Serialize all data in the database into a JSON string. Designed only for test runner usage; will not handle large amounts of data. """ # Build list of all apps to serialize from django.db.migrations.loader import MigrationLoader loader = MigrationLoader(self.connection) app_list = [] for app_config in apps.get_app_configs(): if ( app_config.models_module is not None and app_config.label in loader.migrated_apps and app_config.name not in settings.TEST_NON_SERIALIZED_APPS ): app_list.append((app_config, None)) # Make a function to iteratively return every object def get_objects(): for model in serializers.sort_dependencies(app_list): if (model._meta.can_migrate(self.connection) and router.allow_migrate_model(self.connection.alias, model)): queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name) yield from queryset.iterator() # Serialize to a string out = StringIO() serializers.serialize("json", get_objects(), indent=None, stream=out) return out.getvalue() def deserialize_db_from_string(self, data): """ Reload the database with data from a string generated by the serialize_db_to_string() method. """ data = StringIO(data) for obj in serializers.deserialize("json", data, using=self.connection.alias): obj.save() def _get_database_display_str(self, verbosity, database_name): """ Return display string for a database for use in various actions. """ return "'%s'%s" % ( self.connection.alias, (" ('%s')" % database_name) if verbosity >= 2 else '', ) def _get_test_db_name(self): """ Internal implementation - return the name of the test DB that will be created. Only useful when called from create_test_db() and _create_test_db() and when no external munging is done with the 'NAME' settings. """ if self.connection.settings_dict['TEST']['NAME']: return self.connection.settings_dict['TEST']['NAME'] return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] def _execute_create_test_db(self, cursor, parameters, keepdb=False): cursor.execute('CREATE DATABASE %(dbname)s %(suffix)s' % parameters) def _create_test_db(self, verbosity, autoclobber, keepdb=False): """ Internal implementation - create the test db tables. """ test_database_name = self._get_test_db_name() test_db_params = { 'dbname': self.connection.ops.quote_name(test_database_name), 'suffix': self.sql_table_creation_suffix(), } # Create the test database and connect to it. with self._nodb_connection.cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: # if we want to keep the db, then no need to do any of the below, # just return and skip it all. if keepdb: return test_database_name sys.stderr.write( "Got an error creating the test database: %s\n" % e) if not autoclobber: confirm = input( "Type 'yes' if you would like to try deleting the test " "database '%s', or 'no' to cancel: " % test_database_name) if autoclobber or confirm == 'yes': try: if verbosity >= 1: print("Destroying old test database for alias %s..." % ( self._get_database_display_str(verbosity, test_database_name), )) cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: sys.stderr.write( "Got an error recreating the test database: %s\n" % e) sys.exit(2) else: print("Tests cancelled.") sys.exit(1) return test_database_name def clone_test_db(self, number, verbosity=1, autoclobber=False, keepdb=False): """ Clone a test database. """ source_database_name = self.connection.settings_dict['NAME'] if verbosity >= 1: action = 'Cloning test database' if keepdb: action = 'Using existing clone' print("%s for alias %s..." % ( action, self._get_database_display_str(verbosity, source_database_name), )) # We could skip this call if keepdb is True, but we instead # give it the keepdb param. See create_test_db for details. self._clone_test_db(number, verbosity, keepdb) def get_test_db_clone_settings(self, number): """ Return a modified connection settings dict for the n-th clone of a DB. """ # When this function is called, the test database has been created # already and its name has been copied to settings_dict['NAME'] so # we don't need to call _get_test_db_name. orig_settings_dict = self.connection.settings_dict new_settings_dict = orig_settings_dict.copy() new_settings_dict['NAME'] = '{}_{}'.format(orig_settings_dict['NAME'], number) return new_settings_dict def _clone_test_db(self, number, verbosity, keepdb=False): """ Internal implementation - duplicate the test db tables. """ raise NotImplementedError( "The database backend doesn't support cloning databases. " "Disable the option to run tests in parallel processes.") def destroy_test_db(self, old_database_name=None, verbosity=1, keepdb=False, number=None): """ Destroy a test database, prompting the user for confirmation if the database already exists. """ self.connection.close() if number is None: test_database_name = self.connection.settings_dict['NAME'] else: test_database_name = self.get_test_db_clone_settings(number)['NAME'] if verbosity >= 1: action = 'Destroying' if keepdb: action = 'Preserving' print("%s test database for alias %s..." % ( action, self._get_database_display_str(verbosity, test_database_name), )) # if we want to preserve the database # skip the actual destroying piece. if not keepdb: self._destroy_test_db(test_database_name, verbosity) # Restore the original database name if old_database_name is not None: settings.DATABASES[self.connection.alias]["NAME"] = old_database_name self.connection.settings_dict["NAME"] = old_database_name def _destroy_test_db(self, test_database_name, verbosity): """ Internal implementation - remove the test db tables. """ # Remove the test database to clean up after # ourselves. Connect to the previous database (not the test database) # to do so, because it's not allowed to delete a database while being # connected to it. with self.connection._nodb_connection.cursor() as cursor: cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name)) def sql_table_creation_suffix(self): """ SQL to append to the end of the test table creation statements. """ return '' def test_db_signature(self): """ Return a tuple with elements of self.connection.settings_dict (a DATABASES setting value) that uniquely identify a database accordingly to the RDBMS particularities. """ settings_dict = self.connection.settings_dict return ( settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], self._get_test_db_name(), )
bsd-3-clause
gitcoinco/web
app/marketing/management/commands/debug_test.py
1
1299
''' Copyright (C) 2021 Gitcoin Core This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from django.conf import settings from django.core.management.base import BaseCommand class Command(BaseCommand): help = 'stub for local testing' def handle(self, *args, **options): if not settings.DEBUG: print("cannot be run without settings.DEBUG") return from kudos.models import Token from avatar.utils import svg_to_png_inkscape token = Token.objects.get(pk=182) file_path = f"/code/app/assets/{token.image}" with open(file_path, 'rb') as f: print(svg_to_png_inkscape(f.read()))
agpl-3.0
mikkylok/mikky.lu
venv/lib/python2.7/site-packages/setuptools/sandbox.py
80
14549
import os import sys import tempfile import operator import functools import itertools import re import contextlib import pickle import textwrap from setuptools.extern import six from setuptools.extern.six.moves import builtins, map import pkg_resources.py31compat if sys.platform.startswith('java'): import org.python.modules.posix.PosixModule as _os else: _os = sys.modules[os.name] try: _file = file except NameError: _file = None _open = open from distutils.errors import DistutilsError from pkg_resources import working_set __all__ = [ "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", ] def _execfile(filename, globals, locals=None): """ Python 3 implementation of execfile. """ mode = 'rb' with open(filename, mode) as stream: script = stream.read() # compile() function in Python 2.6 and 3.1 requires LF line endings. if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2): script = script.replace(b'\r\n', b'\n') script = script.replace(b'\r', b'\n') if locals is None: locals = globals code = compile(script, filename, 'exec') exec(code, globals, locals) @contextlib.contextmanager def save_argv(repl=None): saved = sys.argv[:] if repl is not None: sys.argv[:] = repl try: yield saved finally: sys.argv[:] = saved @contextlib.contextmanager def save_path(): saved = sys.path[:] try: yield saved finally: sys.path[:] = saved @contextlib.contextmanager def override_temp(replacement): """ Monkey-patch tempfile.tempdir with replacement, ensuring it exists """ pkg_resources.py31compat.makedirs(replacement, exist_ok=True) saved = tempfile.tempdir tempfile.tempdir = replacement try: yield finally: tempfile.tempdir = saved @contextlib.contextmanager def pushd(target): saved = os.getcwd() os.chdir(target) try: yield saved finally: os.chdir(saved) class UnpickleableException(Exception): """ An exception representing another Exception that could not be pickled. """ @staticmethod def dump(type, exc): """ Always return a dumped (pickled) type and exc. If exc can't be pickled, wrap it in UnpickleableException first. """ try: return pickle.dumps(type), pickle.dumps(exc) except Exception: # get UnpickleableException inside the sandbox from setuptools.sandbox import UnpickleableException as cls return cls.dump(cls, cls(repr(exc))) class ExceptionSaver: """ A Context Manager that will save an exception, serialized, and restore it later. """ def __enter__(self): return self def __exit__(self, type, exc, tb): if not exc: return # dump the exception self._saved = UnpickleableException.dump(type, exc) self._tb = tb # suppress the exception return True def resume(self): "restore and re-raise any exception" if '_saved' not in vars(self): return type, exc = map(pickle.loads, self._saved) six.reraise(type, exc, self._tb) @contextlib.contextmanager def save_modules(): """ Context in which imported modules are saved. Translates exceptions internal to the context into the equivalent exception outside the context. """ saved = sys.modules.copy() with ExceptionSaver() as saved_exc: yield saved sys.modules.update(saved) # remove any modules imported since del_modules = ( mod_name for mod_name in sys.modules if mod_name not in saved # exclude any encodings modules. See #285 and not mod_name.startswith('encodings.') ) _clear_modules(del_modules) saved_exc.resume() def _clear_modules(module_names): for mod_name in list(module_names): del sys.modules[mod_name] @contextlib.contextmanager def save_pkg_resources_state(): saved = pkg_resources.__getstate__() try: yield saved finally: pkg_resources.__setstate__(saved) @contextlib.contextmanager def setup_context(setup_dir): temp_dir = os.path.join(setup_dir, 'temp') with save_pkg_resources_state(): with save_modules(): hide_setuptools() with save_path(): with save_argv(): with override_temp(temp_dir): with pushd(setup_dir): # ensure setuptools commands are available __import__('setuptools') yield def _needs_hiding(mod_name): """ >>> _needs_hiding('setuptools') True >>> _needs_hiding('pkg_resources') True >>> _needs_hiding('setuptools_plugin') False >>> _needs_hiding('setuptools.__init__') True >>> _needs_hiding('distutils') True >>> _needs_hiding('os') False >>> _needs_hiding('Cython') True """ pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)') return bool(pattern.match(mod_name)) def hide_setuptools(): """ Remove references to setuptools' modules from sys.modules to allow the invocation to import the most appropriate setuptools. This technique is necessary to avoid issues such as #315 where setuptools upgrading itself would fail to find a function declared in the metadata. """ modules = filter(_needs_hiding, sys.modules) _clear_modules(modules) def run_setup(setup_script, args): """Run a distutils setup script, sandboxed in its directory""" setup_dir = os.path.abspath(os.path.dirname(setup_script)) with setup_context(setup_dir): try: sys.argv[:] = [setup_script] + list(args) sys.path.insert(0, setup_dir) # reset to include setup dir, w/clean callback list working_set.__init__() working_set.callbacks.append(lambda dist: dist.activate()) # __file__ should be a byte string on Python 2 (#712) dunder_file = ( setup_script if isinstance(setup_script, str) else setup_script.encode(sys.getfilesystemencoding()) ) with DirectorySandbox(setup_dir): ns = dict(__file__=dunder_file, __name__='__main__') _execfile(setup_script, ns) except SystemExit as v: if v.args and v.args[0]: raise # Normal exit, just return class AbstractSandbox: """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" _active = False def __init__(self): self._attrs = [ name for name in dir(_os) if not name.startswith('_') and hasattr(self, name) ] def _copy(self, source): for name in self._attrs: setattr(os, name, getattr(source, name)) def __enter__(self): self._copy(self) if _file: builtins.file = self._file builtins.open = self._open self._active = True def __exit__(self, exc_type, exc_value, traceback): self._active = False if _file: builtins.file = _file builtins.open = _open self._copy(_os) def run(self, func): """Run 'func' under os sandboxing""" with self: return func() def _mk_dual_path_wrapper(name): original = getattr(_os, name) def wrap(self, src, dst, *args, **kw): if self._active: src, dst = self._remap_pair(name, src, dst, *args, **kw) return original(src, dst, *args, **kw) return wrap for name in ["rename", "link", "symlink"]: if hasattr(_os, name): locals()[name] = _mk_dual_path_wrapper(name) def _mk_single_path_wrapper(name, original=None): original = original or getattr(_os, name) def wrap(self, path, *args, **kw): if self._active: path = self._remap_input(name, path, *args, **kw) return original(path, *args, **kw) return wrap if _file: _file = _mk_single_path_wrapper('file', _file) _open = _mk_single_path_wrapper('open', _open) for name in [ "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", "startfile", "mkfifo", "mknod", "pathconf", "access" ]: if hasattr(_os, name): locals()[name] = _mk_single_path_wrapper(name) def _mk_single_with_return(name): original = getattr(_os, name) def wrap(self, path, *args, **kw): if self._active: path = self._remap_input(name, path, *args, **kw) return self._remap_output(name, original(path, *args, **kw)) return original(path, *args, **kw) return wrap for name in ['readlink', 'tempnam']: if hasattr(_os, name): locals()[name] = _mk_single_with_return(name) def _mk_query(name): original = getattr(_os, name) def wrap(self, *args, **kw): retval = original(*args, **kw) if self._active: return self._remap_output(name, retval) return retval return wrap for name in ['getcwd', 'tmpnam']: if hasattr(_os, name): locals()[name] = _mk_query(name) def _validate_path(self, path): """Called to remap or validate any path, whether input or output""" return path def _remap_input(self, operation, path, *args, **kw): """Called for path inputs""" return self._validate_path(path) def _remap_output(self, operation, path): """Called for path outputs""" return self._validate_path(path) def _remap_pair(self, operation, src, dst, *args, **kw): """Called for path pairs like rename, link, and symlink operations""" return ( self._remap_input(operation + '-from', src, *args, **kw), self._remap_input(operation + '-to', dst, *args, **kw) ) if hasattr(os, 'devnull'): _EXCEPTIONS = [os.devnull,] else: _EXCEPTIONS = [] class DirectorySandbox(AbstractSandbox): """Restrict operations to a single subdirectory - pseudo-chroot""" write_ops = dict.fromkeys([ "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam", ]) _exception_patterns = [ # Allow lib2to3 to attempt to save a pickled grammar object (#121) r'.*lib2to3.*\.pickle$', ] "exempt writing to paths that match the pattern" def __init__(self, sandbox, exceptions=_EXCEPTIONS): self._sandbox = os.path.normcase(os.path.realpath(sandbox)) self._prefix = os.path.join(self._sandbox, '') self._exceptions = [ os.path.normcase(os.path.realpath(path)) for path in exceptions ] AbstractSandbox.__init__(self) def _violation(self, operation, *args, **kw): from setuptools.sandbox import SandboxViolation raise SandboxViolation(operation, args, kw) if _file: def _file(self, path, mode='r', *args, **kw): if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): self._violation("file", path, mode, *args, **kw) return _file(path, mode, *args, **kw) def _open(self, path, mode='r', *args, **kw): if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): self._violation("open", path, mode, *args, **kw) return _open(path, mode, *args, **kw) def tmpnam(self): self._violation("tmpnam") def _ok(self, path): active = self._active try: self._active = False realpath = os.path.normcase(os.path.realpath(path)) return ( self._exempted(realpath) or realpath == self._sandbox or realpath.startswith(self._prefix) ) finally: self._active = active def _exempted(self, filepath): start_matches = ( filepath.startswith(exception) for exception in self._exceptions ) pattern_matches = ( re.match(pattern, filepath) for pattern in self._exception_patterns ) candidates = itertools.chain(start_matches, pattern_matches) return any(candidates) def _remap_input(self, operation, path, *args, **kw): """Called for path inputs""" if operation in self.write_ops and not self._ok(path): self._violation(operation, os.path.realpath(path), *args, **kw) return path def _remap_pair(self, operation, src, dst, *args, **kw): """Called for path pairs like rename, link, and symlink operations""" if not self._ok(src) or not self._ok(dst): self._violation(operation, src, dst, *args, **kw) return (src, dst) def open(self, file, flags, mode=0o777, *args, **kw): """Called for low-level os.open()""" if flags & WRITE_FLAGS and not self._ok(file): self._violation("os.open", file, flags, mode, *args, **kw) return _os.open(file, flags, mode, *args, **kw) WRITE_FLAGS = functools.reduce( operator.or_, [getattr(_os, a, 0) for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] ) class SandboxViolation(DistutilsError): """A setup script attempted to modify the filesystem outside the sandbox""" tmpl = textwrap.dedent(""" SandboxViolation: {cmd}{args!r} {kwargs} The package setup script has attempted to modify files on your system that are not within the EasyInstall build area, and has been aborted. This package cannot be safely installed by EasyInstall, and may not support alternate installation locations even if you run its setup script by hand. Please inform the package's author and the EasyInstall maintainers to find out if a fix or workaround is available. """).lstrip() def __str__(self): cmd, args, kwargs = self.args return self.tmpl.format(**locals())
mit
playm2mboy/edx-platform
lms/djangoapps/courseware/migrations/0010_rename_xblock_field_content_to_user_state_summary.py
114
11590
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Removing unique constraint on 'XModuleSettingsField', fields ['usage_id', 'field_name'] db.delete_unique('courseware_xmodulesettingsfield', ['usage_id', 'field_name']) # Deleting model 'XModuleSettingsField' db.delete_table('courseware_xmodulesettingsfield') # Move all content currently stored as Scope.content to Scope.user_state_summary db.rename_table('courseware_xmodulecontentfield', 'courseware_xmoduleuserstatesummaryfield') db.rename_column('courseware_xmoduleuserstatesummaryfield', 'definition_id', 'usage_id') def backwards(self, orm): # Adding model 'XModuleSettingsField' db.create_table('courseware_xmodulesettingsfield', ( ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True, db_index=True)), ('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True, db_index=True)), ('value', self.gf('django.db.models.fields.TextField')(default='null')), ('field_name', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('usage_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), )) db.send_create_signal('courseware', ['XModuleSettingsField']) # Adding unique constraint on 'XModuleSettingsField', fields ['usage_id', 'field_name'] db.create_unique('courseware_xmodulesettingsfield', ['usage_id', 'field_name']) db.rename_table('courseware_xmoduleuserstatesummaryfield', 'courseware_xmodulecontentfield') db.rename_column('courseware_xmodulecontentfield', 'usage_id', 'definition_id') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'courseware.offlinecomputedgrade': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'OfflineComputedGrade'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'gradeset': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'courseware.offlinecomputedgradelog': { 'Meta': {'ordering': "['-created']", 'object_name': 'OfflineComputedGradeLog'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nstudents': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'seconds': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'courseware.studentmodule': { 'Meta': {'unique_together': "(('student', 'module_state_key', 'course_id'),)", 'object_name': 'StudentModule'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}), 'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}), 'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}), 'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'courseware.studentmodulehistory': { 'Meta': {'object_name': 'StudentModuleHistory'}, 'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'student_module': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courseware.StudentModule']"}), 'version': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'courseware.xmodulestudentinfofield': { 'Meta': {'unique_together': "(('student', 'field_name'),)", 'object_name': 'XModuleStudentInfoField'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'value': ('django.db.models.fields.TextField', [], {'default': "'null'"}) }, 'courseware.xmodulestudentprefsfield': { 'Meta': {'unique_together': "(('student', 'module_type', 'field_name'),)", 'object_name': 'XModuleStudentPrefsField'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'module_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}), 'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'value': ('django.db.models.fields.TextField', [], {'default': "'null'"}) }, 'courseware.xmoduleuserstatesummaryfield': { 'Meta': {'unique_together': "(('usage_id', 'field_name'),)", 'object_name': 'XModuleUserStateSummaryField'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'usage_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'default': "'null'"}) } } complete_apps = ['courseware']
agpl-3.0
fhamborg/news-please
newsplease/crawler/simple_crawler.py
1
3455
import socket import copy import threading import logging import requests import urllib3 from .response_decoder import decode_response MAX_FILE_SIZE = 20000000 MIN_FILE_SIZE = 10 LOGGER = logging.getLogger(__name__) # customize headers HEADERS = { 'Connection': 'close', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36', } urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) class SimpleCrawler(object): _results = {} @staticmethod def fetch_url(url, timeout=None): """ Crawls the html content of the parameter url and returns the html :param url: :param timeout: in seconds, if None, the urllib default is used :return: """ return SimpleCrawler._fetch_url(url, False, timeout=timeout) @staticmethod def _fetch_url(url, is_threaded, timeout=None): """ Crawls the html content of the parameter url and saves the html in _results :param url: :param is_threaded: If True, results will be stored for later processing by the fetch_urls method. Else not. :param timeout: in seconds, if None, the urllib default is used :return: html of the url """ html_str = None # send try: # read by streaming chunks (stream=True, iter_content=xx) # so we can stop downloading as soon as MAX_FILE_SIZE is reached response = requests.get(url, timeout=timeout, verify=False, allow_redirects=True, headers=HEADERS) except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL): LOGGER.error('malformed URL: %s', url) except requests.exceptions.TooManyRedirects: LOGGER.error('too many redirects: %s', url) except requests.exceptions.SSLError as err: LOGGER.error('SSL: %s %s', url, err) except ( socket.timeout, requests.exceptions.ConnectionError, requests.exceptions.Timeout, socket.error, socket.gaierror ) as err: LOGGER.error('connection/timeout error: %s %s', url, err) else: # safety checks if response.status_code != 200: LOGGER.error('not a 200 response: %s', response.status_code) elif response.text is None or len(response.text) < MIN_FILE_SIZE: LOGGER.error('too small/incorrect: %s %s', url, len(response.text)) elif len(response.text) > MAX_FILE_SIZE: LOGGER.error('too large: %s %s', url, len(response.text)) else: html_str = decode_response(response) if is_threaded: SimpleCrawler._results[url] = html_str return html_str @staticmethod def fetch_urls(urls, timeout=None): """ Crawls the html content of all given urls in parallel. Returns when all requests are processed. :param urls: :param timeout: in seconds, if None, the urllib default is used :return: """ threads = [threading.Thread(target=SimpleCrawler._fetch_url, args=(url, True, timeout)) for url in urls] for thread in threads: thread.start() for thread in threads: thread.join() results = copy.deepcopy(SimpleCrawler._results) SimpleCrawler._results = {} return results
apache-2.0
alexkogon/ansible
test/units/plugins/connections/test_connection.py
86
3988
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from six import StringIO from ansible.compat.tests import unittest from ansible.playbook.play_context import PlayContext from ansible.plugins.connections import ConnectionBase #from ansible.plugins.connections.accelerate import Connection as AccelerateConnection #from ansible.plugins.connections.chroot import Connection as ChrootConnection #from ansible.plugins.connections.funcd import Connection as FuncdConnection #from ansible.plugins.connections.jail import Connection as JailConnection #from ansible.plugins.connections.libvirt_lxc import Connection as LibvirtLXCConnection from ansible.plugins.connections.local import Connection as LocalConnection from ansible.plugins.connections.paramiko_ssh import Connection as ParamikoConnection from ansible.plugins.connections.ssh import Connection as SSHConnection #from ansible.plugins.connections.winrm import Connection as WinRmConnection class TestConnectionBaseClass(unittest.TestCase): def setUp(self): self.play_context = PlayContext() self.in_stream = StringIO() def tearDown(self): pass def test_subclass_error(self): class ConnectionModule1(ConnectionBase): pass with self.assertRaises(TypeError): ConnectionModule1() class ConnectionModule2(ConnectionBase): def get(self, key): super(ConnectionModule2, self).get(key) with self.assertRaises(TypeError): ConnectionModule2() def test_subclass_success(self): class ConnectionModule3(ConnectionBase): @property def transport(self): pass def _connect(self): pass def exec_command(self): pass def put_file(self): pass def fetch_file(self): pass def close(self): pass self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3) # def test_accelerate_connection_module(self): # self.assertIsInstance(AccelerateConnection(), AccelerateConnection) # # def test_chroot_connection_module(self): # self.assertIsInstance(ChrootConnection(), ChrootConnection) # # def test_funcd_connection_module(self): # self.assertIsInstance(FuncdConnection(), FuncdConnection) # # def test_jail_connection_module(self): # self.assertIsInstance(JailConnection(), JailConnection) # # def test_libvirt_lxc_connection_module(self): # self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection) def test_local_connection_module(self): self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection) def test_paramiko_connection_module(self): self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection) def test_ssh_connection_module(self): self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection) # def test_winrm_connection_module(self): # self.assertIsInstance(WinRmConnection(), WinRmConnection)
gpl-3.0
collmot/ardupilot
libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32H743xx.py
11
34833
#!/usr/bin/env python ''' these tables are generated from the STM32 datasheets for the STM32H743bi ''' # additional build information for ChibiOS build = { "CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32h7xx.mk", "CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32H7xx/platform.mk" } # MCU parameters mcu = { # DMA peripheral capabilities: # - can't use ITCM or DTCM for any DMA # - SPI1 to SPI5 can use AXI SRAM, SRAM1 to SRAM3 and SRAM4 for DMA # - SPI6, I2C4 and ADC3 can use SRAM4 on BDMA (I didn't actually test ADC3) # - UARTS can use AXI SRAM, SRAM1 to SRAM3 and SRAM4 for DMA # - I2C1, I2C2 and I2C3 can use AXI SRAM, SRAM1 to SRAM3 and SRAM4 with DMA # - timers can use AXI SRAM, SRAM1 to SRAM3 and SRAM4 with DMA # - ADC12 can use AXI SRAM, SRAM1 to SRAM3 and SRAM4 # - SDMMC can use AXI SRAM, SRAM1 to SRAM3 with IDMA (cannot use SRAM4) # ram map, as list of (address, size-kb, flags) # flags of 1 means DMA-capable (DMA and BDMA) # flags of 2 means faster memory for CPU intensive work # flags of 4 means memory can be used for SDMMC DMA 'RAM_MAP' : [ (0x20000000, 128, 2), # DTCM, tightly coupled, no DMA, fast (0x30000000, 256, 0), # SRAM1, SRAM2 (0x24000000, 512, 4), # AXI SRAM. Use this for SDMMC IDMA ops (0x00000400, 63, 2), # ITCM (first 1k removed, to keep address 0 unused) (0x30040000, 32, 0), # SRAM3. (0x38000000, 64, 1), # SRAM4. ], 'EXPECTED_CLOCK' : 400000000, # this MCU has M7 instructions and hardware double precision 'CORTEX' : 'cortex-m7', 'CPU_FLAGS' : '-mcpu=cortex-m7 -mfpu=fpv5-d16 -mfloat-abi=hard', } pincount = { 'A': 16, 'B': 16, 'C': 16, 'D': 16, 'E': 16, 'F': 16, 'G': 16, 'H': 16, 'I': 16, 'J': 16, 'K': 16 } # no DMA map as we will dynamically allocate DMA channels using the DMAMUX DMA_Map = None AltFunction_map = { # format is PIN:FUNCTION : AFNUM # extracted from tabula-AF-H743.csv "PA0:ETH_MII_CRS" : 11, "PA0:EVENT-OUT" : 15, "PA0:SAI2_SD_B" : 10, "PA0:SDMMC2_CMD" : 9, "PA0:TIM15_BKIN" : 4, "PA0:TIM2_CH1" : 1, "PA0:TIM2_ETR" : 1, "PA0:TIM5_CH1" : 2, "PA0:TIM8_ETR" : 3, "PA0:UART4_TX" : 8, "PA0:USART2_CTS_NSS" : 7, "PA1:ETH_MII_RX_CLK" : 11, "PA1:ETH_RMII_REF_CLK" : 11, "PA1:EVENT-OUT" : 15, "PA1:LCD_R2" : 14, "PA1:LPTIM3_OUT" : 3, "PA1:QUADSPI_BK1_IO3" : 9, "PA1:SAI2_MCK_B" : 10, "PA1:TIM15_CH1N" : 4, "PA1:TIM2_CH2" : 1, "PA1:TIM5_CH2" : 2, "PA1:UART4_RX" : 8, "PA1:USART2_RTS" : 7, "PA2:ETH_MDIO" : 11, "PA2:EVENT-OUT" : 15, "PA2:LCD_R1" : 14, "PA2:LPTIM4_OUT" : 3, "PA2:MDIOS_MDIO" : 12, "PA2:SAI2_SCK_B" : 8, "PA2:TIM15_CH1" : 4, "PA2:TIM2_CH3" : 1, "PA2:TIM5_CH3" : 2, "PA2:USART2_TX" : 7, "PA3:ETH_MII_COL" : 11, "PA3:EVENT-OUT" : 15, "PA3:LCD_B2" : 9, "PA3:LCD_B5" : 14, "PA3:LPTIM5_OUT" : 3, "PA3:OTG_HS_ULPI_D0" : 10, "PA3:TIM15_CH2" : 4, "PA3:TIM2_CH4" : 1, "PA3:TIM5_CH4" : 2, "PA3:USART2_RX" : 7, "PA4:DCMI_HSYNC" : 13, "PA4:EVENT-OUT" : 15, "PA4:I2S1_WS" : 5, "PA4:I2S3_WS" : 6, "PA4:LCD_VSYNC" : 14, "PA4:OTG_HS_SOF" : 12, "PA4:SPI1_NSS" : 5, "PA4:SPI3_NSS" : 6, "PA4:SPI6_NSS" : 8, "PA4:TIM5_ETR" : 2, "PA4:USART2_CK" : 7, "PA5:EVENT-OUT" : 15, "PA5:I2S1_CK" : 5, "PA5:LCD_R4" : 14, "PA5:OTG_HS_ULPI_CK" : 10, "PA5:SPI1_SCK" : 5, "PA5:SPI6_SCK" : 8, "PA5:TIM2_CH1" : 1, "PA5:TIM2_ETR" : 1, "PA5:TIM8_CH1N" : 3, "PA6:DCMI_PIXCLK" : 13, "PA6:EVENT-OUT" : 15, "PA6:I2S1_SDI" : 5, "PA6:LCD_G2" : 14, "PA6:MDIOS_MDC" : 11, "PA6:SPI1_MISO" : 5, "PA6:SPI6_MISO" : 8, "PA6:TIM13_CH1" : 9, "PA6:TIM1_BKIN" : 1, "PA6:TIM1_BKIN_COMP12" : 12, "PA6:TIM3_CH1" : 2, "PA6:TIM8_BKIN" : 3, "PA6:TIM8_BKIN_COMP12" : 10, "PA7:ETH_MII_RX_DV" : 11, "PA7:ETH_RMII_CRS_DV" : 11, "PA7:EVENT-OUT" : 15, "PA7:FMC_SDNWE" : 12, "PA7:I2S1_SDO" : 5, "PA7:SPI1_MOSI" : 5, "PA7:SPI6_MOSI" : 8, "PA7:TIM14_CH1" : 9, "PA7:TIM1_CH1N" : 1, "PA7:TIM3_CH2" : 2, "PA7:TIM8_CH1N" : 3, "PA8:EVENT-OUT" : 15, "PA8:HRTIM_CHB2" : 2, "PA8:I2C3_SCL" : 4, "PA8:LCD_B3" : 13, "PA8:LCD_R6" : 14, "PA8:MCO1" : 0, "PA8:OTG_FS_SOF" : 10, "PA8:TIM1_CH1" : 1, "PA8:TIM8_BKIN2" : 3, "PA8:TIM8_BKIN2_COMP12" : 12, "PA8:UART7_RX" : 11, "PA8:USART1_CK" : 7, "PA9:DCMI_D0" : 13, "PA9:EVENT-OUT" : 15, "PA9:CAN1_RX" : 9, "PA9:HRTIM_CHC1" : 2, "PA9:I2C3_SMBA" : 4, "PA9:I2S2_CK" : 5, "PA9:LCD_R5" : 14, "PA9:LPUART1_TX" : 3, "PA9:SPI2_SCK" : 5, "PA9:TIM1_CH2" : 1, "PA9:USART1_TX" : 7, "PA10:DCMI_D1" : 13, "PA10:EVENT-OUT" : 15, "PA10:CAN1_TX" : 9, "PA10:HRTIM_CHC2" : 2, "PA10:LCD_B1" : 14, "PA10:LCD_B4" : 12, "PA10:LPUART1_RX" : 3, "PA10:MDIOS_MDIO" : 11, "PA10:OTG_FS_ID" : 10, "PA10:TIM1_CH3" : 1, "PA10:USART1_RX" : 7, "PA11:EVENT-OUT" : 15, "PA11:CAN1_RX" : 9, "PA11:HRTIM_CHD1" : 2, "PA11:I2S2_WS" : 5, "PA11:LCD_R4" : 14, "PA11:LPUART1_CTS" : 3, "PA11:OTG_FS_DM" : 10, "PA11:SPI2_NSS" : 5, "PA11:TIM1_CH4" : 1, "PA11:UART4_RX" : 6, "PA11:USART1_CTS_NSS" : 7, "PA12:EVENT-OUT" : 15, "PA12:CAN1_TX" : 9, "PA12:HRTIM_CHD2" : 2, "PA12:I2S2_CK" : 5, "PA12:LCD_R5" : 14, "PA12:LPUART1_RTS" : 3, "PA12:OTG_FS_DP" : 10, "PA12:SAI2_FS_B" : 8, "PA12:SPI2_SCK" : 5, "PA12:TIM1_ETR" : 1, "PA12:UART4_TX" : 6, "PA12:USART1_RTS" : 7, "PA13:EVENT-OUT" : 15, "PA13:JTMS-SWDIO" : 0, "PA14:EVENT-OUT" : 15, "PA14:JTCK-SWCLK" : 0, "PA15:EVENT-OUT" : 15, "PA15:HDMI_CEC" : 4, "PA15:HRTIM_FLT1" : 2, "PA15:I2S1_WS" : 5, "PA15:I2S3_WS" : 6, "PA15:JTDI" : 0, "PA15:SPI1_NSS" : 5, "PA15:SPI3_NSS" : 6, "PA15:SPI6_NSS" : 7, "PA15:TIM2_CH1" : 1, "PA15:TIM2_ETR" : 1, "PA15:UART4_RTS" : 8, "PA15:UART7_TX" : 11, "PB0:DFSDM_CKOUT" : 6, "PB0:ETH_MII_RXD2" : 11, "PB0:EVENT-OUT" : 15, "PB0:LCD_G1" : 14, "PB0:LCD_R3" : 9, "PB0:OTG_HS_ULPI_D1" : 10, "PB0:TIM1_CH2N" : 1, "PB0:TIM3_CH3" : 2, "PB0:TIM8_CH2N" : 3, "PB0:UART4_CTS" : 8, "PB1:DFSDM_DATIN1" : 6, "PB1:ETH_MII_RXD3" : 11, "PB1:EVENT-OUT" : 15, "PB1:LCD_G0" : 14, "PB1:LCD_R6" : 9, "PB1:OTG_HS_ULPI_D2" : 10, "PB1:TIM1_CH3N" : 1, "PB1:TIM3_CH4" : 2, "PB1:TIM8_CH3N" : 3, "PB2:DFSDM_CKIN1" : 4, "PB2:EVENT-OUT" : 15, "PB2:I2S3_SDO" : 7, "PB2:QUADSPI_CLK" : 9, "PB2:SAI1_D1" : 2, "PB2:SAI1_SD_A" : 6, "PB2:SAI4_D1" : 10, "PB2:SAI4_SD_A" : 8, "PB2:SPI3_MOSI" : 7, "PB3:EVENT-OUT" : 15, "PB3:HRTIM_FLT4" : 2, "PB3:I2S1_CK" : 5, "PB3:I2S3_CK" : 6, "PB3:JTDO" : 0, "PB3:SDMMC2_D2" : 9, "PB3:SPI1_SCK" : 5, "PB3:SPI3_SCK" : 6, "PB3:SPI6_SCK" : 8, "PB3:TIM2_CH2" : 1, "PB3:TRACESWO" : 0, "PB3:UART7_RX" : 11, "PB4:EVENT-OUT" : 15, "PB4:HRTIM_EEV6" : 3, "PB4:I2S1_SDI" : 5, "PB4:I2S2_WS" : 7, "PB4:I2S3_SDI" : 6, "PB4:NJTRST" : 0, "PB4:SDMMC2_D3" : 9, "PB4:SPI1_MISO" : 5, "PB4:SPI2_NSS" : 7, "PB4:SPI3_MISO" : 6, "PB4:SPI6_MISO" : 8, "PB4:TIM16_BKIN" : 1, "PB4:TIM3_CH1" : 2, "PB4:UART7_TX" : 11, "PB5:DCMI_D10" : 13, "PB5:ETH_PPS_OUT" : 11, "PB5:EVENT-OUT" : 15, "PB5:CAN2_RX" : 9, "PB5:FMC_SDCKE1" : 12, "PB5:HRTIM_EEV7" : 3, "PB5:I2C1_SMBA" : 4, "PB5:I2C4_SMBA" : 6, "PB5:I2S1_SDO" : 5, "PB5:I2S3_SDO" : 7, "PB5:OTG_HS_ULPI_D7" : 10, "PB5:SPI1_MOSI" : 5, "PB5:SPI3_MOSI" : 7, "PB5:SPI6_MOSI" : 8, "PB5:TIM17_BKIN" : 1, "PB5:TIM3_CH2" : 2, "PB5:UART5_RX" : 14, "PB6:DCMI_D5" : 13, "PB6:DFSDM_DATIN5" : 11, "PB6:EVENT-OUT" : 15, "PB6:CAN2_TX" : 9, "PB6:FMC_SDNE1" : 12, "PB6:HDMI_CEC" : 5, "PB6:HRTIM_EEV8" : 3, "PB6:I2C1_SCL" : 4, "PB6:I2C4_SCL" : 6, "PB6:LPUART1_TX" : 8, "PB6:QUADSPI_BK1_NCS" : 10, "PB6:TIM16_CH1N" : 1, "PB6:TIM4_CH1" : 2, "PB6:UART5_TX" : 14, "PB6:USART1_TX" : 7, "PB7:DCMI_VSYNC" : 13, "PB7:DFSDM_CKIN5" : 11, "PB7:EVENT-OUT" : 15, "PB7:CAN2_TX" : 9, "PB7:FMC_NL" : 12, "PB7:HRTIM_EEV9" : 3, "PB7:I2C1_SDA" : 4, "PB7:I2C4_SDA" : 6, "PB7:LPUART1_RX" : 8, "PB7:TIM17_CH1N" : 1, "PB7:TIM4_CH2" : 2, "PB7:USART1_RX" : 7, "PB8:DCMI_D6" : 13, "PB8:DFSDM_CKIN7" : 3, "PB8:ETH_MII_TXD3" : 11, "PB8:EVENT-OUT" : 15, "PB8:CAN1_RX" : 9, "PB8:I2C1_SCL" : 4, "PB8:I2C4_SCL" : 6, "PB8:LCD_B6" : 14, "PB8:SDMMC1_CKIN" : 7, "PB8:SDMMC1_D4" : 12, "PB8:SDMMC2_D4" : 10, "PB8:TIM16_CH1" : 1, "PB8:TIM4_CH3" : 2, "PB8:UART4_RX" : 8, "PB9:DCMI_D7" : 13, "PB9:DFSDM_DATIN7" : 3, "PB9:EVENT-OUT" : 15, "PB9:CAN1_TX" : 9, "PB9:I2C1_SDA" : 4, "PB9:I2C4_SDA" : 6, "PB9:I2C4_SMBA" : 11, "PB9:I2S2_WS" : 5, "PB9:LCD_B7" : 14, "PB9:SDMMC1_CDIR" : 7, "PB9:SDMMC1_D5" : 12, "PB9:SDMMC2_D5" : 10, "PB9:SPI2_NSS" : 5, "PB9:TIM17_CH1" : 1, "PB9:TIM4_CH4" : 2, "PB9:UART4_TX" : 8, "PB10:DFSDM_DATIN7" : 6, "PB10:ETH_MII_RX_ER" : 11, "PB10:EVENT-OUT" : 15, "PB10:HRTIM_SCOUT" : 2, "PB10:I2C2_SCL" : 4, "PB10:I2S2_CK" : 5, "PB10:LCD_G4" : 14, "PB10:LPTIM2_IN1" : 3, "PB10:OTG_HS_ULPI_D3" : 10, "PB10:QUADSPI_BK1_NCS" : 9, "PB10:SPI2_SCK" : 5, "PB10:TIM2_CH3" : 1, "PB10:USART3_TX" : 7, "PB11:DFSDM_CKIN7" : 6, "PB11:ETH_MII_TX_EN" : 11, "PB11:ETH_RMII_TX_EN" : 11, "PB11:EVENT-OUT" : 15, "PB11:HRTIM_SCIN" : 2, "PB11:I2C2_SDA" : 4, "PB11:LCD_G5" : 14, "PB11:LPTIM2_ETR" : 3, "PB11:OTG_HS_ULPI_D4" : 10, "PB11:TIM2_CH4" : 1, "PB11:USART3_RX" : 7, "PB12:DFSDM_DATIN1" : 6, "PB12:ETH_MII_TXD0" : 11, "PB12:ETH_RMII_TXD0" : 11, "PB12:EVENT-OUT" : 15, "PB12:CAN2_RX" : 9, "PB12:I2C2_SMBA" : 4, "PB12:I2S2_WS" : 5, "PB12:OTG_HS_ID" : 12, "PB12:OTG_HS_ULPI_D5" : 10, "PB12:SPI2_NSS" : 5, "PB12:TIM1_BKIN" : 1, "PB12:TIM1_BKIN_COMP12" : 13, "PB12:UART5_RX" : 14, "PB12:USART3_CK" : 7, "PB13:DFSDM_CKIN1" : 6, "PB13:ETH_MII_TXD1" : 11, "PB13:ETH_RMII_TXD1" : 11, "PB13:EVENT-OUT" : 15, "PB13:CAN2_TX" : 9, "PB13:I2S2_CK" : 5, "PB13:LPTIM2_OUT" : 3, "PB13:OTG_HS_ULPI_D6" : 10, "PB13:SPI2_SCK" : 5, "PB13:TIM1_CH1N" : 1, "PB13:UART5_TX" : 14, "PB13:USART3_CTS_NSS" : 7, "PB14:DFSDM_DATIN2" : 6, "PB14:EVENT-OUT" : 15, "PB14:I2S2_SDI" : 5, "PB14:OTG_HS_DM" : 12, "PB14:SDMMC2_D0" : 9, "PB14:SPI2_MISO" : 5, "PB14:TIM12_CH1" : 2, "PB14:TIM1_CH2N" : 1, "PB14:TIM8_CH2N" : 3, "PB14:UART4_RTS" : 8, "PB14:USART1_TX" : 4, "PB14:USART3_RTS" : 7, "PB15:DFSDM_CKIN2" : 6, "PB15:EVENT-OUT" : 15, "PB15:I2S2_SDO" : 5, "PB15:OTG_HS_DP" : 12, "PB15:RTC_REFIN" : 0, "PB15:SDMMC2_D1" : 9, "PB15:SPI2_MOSI" : 5, "PB15:TIM12_CH2" : 2, "PB15:TIM1_CH3N" : 1, "PB15:TIM8_CH3N" : 3, "PB15:UART4_CTS" : 8, "PB15:USART1_RX" : 4, "PC0:DFSDM_CKIN0" : 3, "PC0:DFSDM_DATIN4" : 6, "PC0:EVENT-OUT" : 15, "PC0:FMC_SDNWE" : 12, "PC0:LCD_R5" : 14, "PC0:OTG_HS_ULPI_STP" : 10, "PC0:SAI2_FS_B" : 8, "PC1:DFSDM_CKIN4" : 4, "PC1:DFSDM_DATIN0" : 3, "PC1:ETH_MDC" : 11, "PC1:EVENT-OUT" : 15, "PC1:I2S2_SDO" : 5, "PC1:MDIOS_MDC" : 12, "PC1:SAI1_D1" : 2, "PC1:SAI1_SD_A" : 6, "PC1:SAI4_D1" : 10, "PC1:SAI4_SD_A" : 8, "PC1:SDMMC2_CK" : 9, "PC1:SPI2_MOSI" : 5, "PC1:TRACED0" : 0, "PC2:DFSDM_CKIN1" : 3, "PC2:DFSDM_CKOUT" : 6, "PC2:ETH_MII_TXD2" : 11, "PC2:EVENT-OUT" : 15, "PC2:FMC_SDNE0" : 12, "PC2:I2S2_SDI" : 5, "PC2:OTG_HS_ULPI_DIR" : 10, "PC2:SPI2_MISO" : 5, "PC3:DFSDM_DATIN1" : 3, "PC3:ETH_MII_TX_CLK" : 11, "PC3:EVENT-OUT" : 15, "PC3:FMC_SDCKE0" : 12, "PC3:I2S2_SDO" : 5, "PC3:OTG_HS_ULPI_NXT" : 10, "PC3:SPI2_MOSI" : 5, "PC4:DFSDM_CKIN2" : 3, "PC4:ETH_MII_RXD0" : 11, "PC4:ETH_RMII_RXD0" : 11, "PC4:EVENT-OUT" : 15, "PC4:FMC_SDNE0" : 12, "PC4:I2S1_MCK" : 5, "PC4:SPDIFRX_IN2" : 9, "PC5:COMP_1_OUT" : 13, "PC5:DFSDM_DATIN2" : 3, "PC5:ETH_MII_RXD1" : 11, "PC5:ETH_RMII_RXD1" : 11, "PC5:EVENT-OUT" : 15, "PC5:FMC_SDCKE0" : 12, "PC5:SAI1_D3" : 2, "PC5:SAI4_D3" : 10, "PC5:SPDIFRX_IN3" : 9, "PC6:DCMI_D0" : 13, "PC6:DFSDM_CKIN3" : 4, "PC6:EVENT-OUT" : 15, "PC6:FMC_NWAIT" : 9, "PC6:HRTIM_CHA1" : 1, "PC6:I2S2_MCK" : 5, "PC6:LCD_HSYNC" : 14, "PC6:SDMMC1_D0DIR" : 8, "PC6:SDMMC1_D6" : 12, "PC6:SDMMC2_D6" : 10, "PC6:TIM3_CH1" : 2, "PC6:TIM8_CH1" : 3, "PC6:USART6_TX" : 7, "PC7:DCMI_D1" : 13, "PC7:DFSDM_DATIN3" : 4, "PC7:EVENT-OUT" : 15, "PC7:FMC_NE1" : 9, "PC7:HRTIM_CHA2" : 1, "PC7:I2S3_MCK" : 6, "PC7:LCD_G6" : 14, "PC7:SDMMC1_D123DIR" : 8, "PC7:SDMMC1_D7" : 12, "PC7:SDMMC2_D7" : 10, "PC7:SWPMI_TX" : 11, "PC7:TIM3_CH2" : 2, "PC7:TIM8_CH2" : 3, "PC7:TRGIO" : 0, "PC7:USART6_RX" : 7, "PC8:DCMI_D2" : 13, "PC8:EVENT-OUT" : 15, "PC8:FMC_NCE" : 9, "PC8:FMC_NE2" : 9, "PC8:HRTIM_CHB1" : 1, "PC8:SDMMC1_D0" : 12, "PC8:SWPMI_RX" : 11, "PC8:TIM3_CH3" : 2, "PC8:TIM8_CH3" : 3, "PC8:TRACED1" : 0, "PC8:UART5_RTS" : 8, "PC8:USART6_CK" : 7, "PC9:DCMI_D3" : 13, "PC9:EVENT-OUT" : 15, "PC9:I2C3_SDA" : 4, "PC9:I2S_CKIN" : 5, "PC9:LCD_B2" : 14, "PC9:LCD_G3" : 10, "PC9:MCO2" : 0, "PC9:QUADSPI_BK1_IO0" : 9, "PC9:SDMMC1_D1" : 12, "PC9:SWPMI_SUSPEND" : 11, "PC9:TIM3_CH4" : 2, "PC9:TIM8_CH4" : 3, "PC9:UART5_CTS" : 8, "PC10:DCMI_D8" : 13, "PC10:DFSDM_CKIN5" : 3, "PC10:EVENT-OUT" : 15, "PC10:HRTIM_EEV1" : 2, "PC10:I2S3_CK" : 6, "PC10:LCD_R2" : 14, "PC10:QUADSPI_BK1_IO1" : 9, "PC10:SDMMC1_D2" : 12, "PC10:SPI3_SCK" : 6, "PC10:UART4_TX" : 8, "PC10:USART3_TX" : 7, "PC11:DCMI_D4" : 13, "PC11:DFSDM_DATIN5" : 3, "PC11:EVENT-OUT" : 15, "PC11:HRTIM_FLT2" : 2, "PC11:I2S3_SDI" : 6, "PC11:QUADSPI_BK2_NCS" : 9, "PC11:SDMMC1_D3" : 12, "PC11:SPI3_MISO" : 6, "PC11:UART4_RX" : 8, "PC11:USART3_RX" : 7, "PC12:DCMI_D9" : 13, "PC12:EVENT-OUT" : 15, "PC12:HRTIM_EEV2" : 2, "PC12:I2S3_SDO" : 6, "PC12:SDMMC1_CK" : 12, "PC12:SPI3_MOSI" : 6, "PC12:TRACED3" : 0, "PC12:UART5_TX" : 8, "PC12:USART3_CK" : 7, "PC13:EVENT-OUT" : 15, "PC14:EVENT-OUT" : 15, "PC15:EVENT-OUT" : 15, "PD0:DFSDM_CKIN6" : 3, "PD0:EVENT-OUT" : 15, "PD0:CAN1_RX" : 9, "PD0:FMC_D2" : 12, "PD0:FMC_DA2" : 12, "PD0:SAI3_SCK_A" : 6, "PD0:UART4_RX" : 8, "PD1:DFSDM_DATIN6" : 3, "PD1:EVENT-OUT" : 15, "PD1:CAN1_TX" : 9, "PD1:FMC_D3" : 12, "PD1:FMC_DA3" : 12, "PD1:SAI3_SD_A" : 6, "PD1:UART4_TX" : 8, "PD2:DCMI_D11" : 13, "PD2:EVENT-OUT" : 15, "PD2:SDMMC1_CMD" : 12, "PD2:TIM3_ETR" : 2, "PD2:TRACED2" : 0, "PD2:UART5_RX" : 8, "PD3:DCMI_D5" : 13, "PD3:DFSDM_CKOUT" : 3, "PD3:EVENT-OUT" : 15, "PD3:FMC_CLK" : 12, "PD3:I2S2_CK" : 5, "PD3:LCD_G7" : 14, "PD3:SPI2_SCK" : 5, "PD3:USART2_CTS" : 7, "PD4:EVENT-OUT" : 15, "PD4:CAN1_RX" : 9, "PD4:FMC_NOE" : 12, "PD4:HRTIM_FLT3" : 2, "PD4:SAI3_FS_A" : 6, "PD4:USART2_RTS" : 7, "PD5:EVENT-OUT" : 15, "PD5:CAN1_TX" : 9, "PD5:FMC_NWE" : 12, "PD5:HRTIM_EEV3" : 2, "PD5:USART2_TX" : 7, "PD6:DCMI_D10" : 13, "PD6:DFSDM_CKIN4" : 3, "PD6:DFSDM_DATIN1" : 4, "PD6:EVENT-OUT" : 15, "PD6:CAN2_RX" : 9, "PD6:FMC_NWAIT" : 12, "PD6:I2S3_SDO" : 5, "PD6:LCD_B2" : 14, "PD6:SAI1_D1" : 2, "PD6:SAI1_SD_A" : 6, "PD6:SAI4_D1" : 10, "PD6:SAI4_SD_A" : 8, "PD6:SDMMC2_CK" : 11, "PD6:SPI3_MOSI" : 5, "PD6:USART2_RX" : 7, "PD7:DFSDM_CKIN1" : 6, "PD7:DFSDM_DATIN4" : 3, "PD7:EVENT-OUT" : 15, "PD7:FMC_NE1" : 12, "PD7:I2S1_SDO" : 5, "PD7:SDMMC2_CMD" : 11, "PD7:SPDIFRX_IN0" : 9, "PD7:SPI1_MOSI" : 5, "PD7:USART2_CK" : 7, "PD8:DFSDM_CKIN3" : 3, "PD8:EVENT-OUT" : 15, "PD8:FMC_D13" : 12, "PD8:FMC_DA13" : 12, "PD8:SAI3_SCK_B" : 6, "PD8:SPDIFRX_IN1" : 9, "PD8:USART3_TX" : 7, "PD9:DFSDM_DATIN3" : 3, "PD9:EVENT-OUT" : 15, "PD9:CAN2_RX" : 9, "PD9:FMC_D14" : 12, "PD9:FMC_DA14" : 12, "PD9:SAI3_SD_B" : 6, "PD9:USART3_RX" : 7, "PD10:DFSDM_CKOUT" : 3, "PD10:EVENT-OUT" : 15, "PD10:CAN2_TX" : 9, "PD10:FMC_D15" : 12, "PD10:FMC_DA15" : 12, "PD10:LCD_B3" : 14, "PD10:SAI3_FS_B" : 6, "PD10:USART3_CK" : 7, "PD11:EVENT-OUT" : 15, "PD11:FMC_A16" : 12, "PD11:I2C4_SMBA" : 4, "PD11:LPTIM2_IN2" : 3, "PD11:QUADSPI_BK1_IO0" : 9, "PD11:SAI2_SD_A" : 10, "PD11:USART3_CTS" : 7, "PD12:EVENT-OUT" : 15, "PD12:FMC_A17" : 12, "PD12:I2C4_SCL" : 4, "PD12:LPTIM1_IN1" : 1, "PD12:LPTIM2_IN1" : 3, "PD12:QUADSPI_BK1_IO1" : 9, "PD12:SAI2_FS_A" : 10, "PD12:TIM4_CH1" : 2, "PD12:USART3_RTS" : 7, "PD13:EVENT-OUT" : 15, "PD13:FMC_A18" : 12, "PD13:I2C4_SDA" : 4, "PD13:LPTIM1_OUT" : 1, "PD13:QUADSPI_BK1_IO3" : 9, "PD13:SAI2_SCK_A" : 10, "PD13:TIM4_CH2" : 2, "PD14:EVENT-OUT" : 15, "PD14:FMC_D0" : 12, "PD14:FMC_DA0" : 12, "PD14:SAI3_MCLK_B" : 6, "PD14:TIM4_CH3" : 2, "PD14:UART8_CTS" : 8, "PD15:EVENT-OUT" : 15, "PD15:FMC_D1" : 12, "PD15:FMC_DA1" : 12, "PD15:SAI3_MCLK_A" : 6, "PD15:TIM4_CH4" : 2, "PD15:UART8_RTS" : 8, "PE0:DCMI_D2" : 13, "PE0:EVENT-OUT" : 15, "PE0:CAN1_RX" : 9, "PE0:FMC_NBL0" : 12, "PE0:HRTIM_SCIN" : 3, "PE0:LPTIM1_ETR" : 1, "PE0:LPTIM2_ETR" : 4, "PE0:SAI2_MCK_A" : 10, "PE0:TIM4_ETR" : 2, "PE0:UART8_RX" : 8, "PE1:DCMI_D3" : 13, "PE1:EVENT-OUT" : 15, "PE1:CAN1_TX" : 9, "PE1:FMC_NBL1" : 12, "PE1:HRTIM_SCOUT" : 3, "PE1:LPTIM1_IN2" : 1, "PE1:UART8_TX" : 8, "PE2:ETH_MII_TXD3" : 11, "PE2:EVENT-OUT" : 15, "PE2:FMC_A23" : 12, "PE2:QUADSPI_BK1_IO2" : 9, "PE2:SAI1_CK1" : 2, "PE2:SAI1_MCLK_A" : 6, "PE2:SAI4_CK1" : 10, "PE2:SAI4_MCLK_A" : 8, "PE2:SPI4_SCK" : 5, "PE2:TRACECLK" : 0, "PE3:EVENT-OUT" : 15, "PE3:FMC_A19" : 12, "PE3:SAI1_SD_B" : 6, "PE3:SAI4_SD_B" : 8, "PE3:TIM15_BKIN" : 4, "PE3:TRACED0" : 0, "PE4:DCMI_D4" : 13, "PE4:DFSDM_DATIN3" : 3, "PE4:EVENT-OUT" : 15, "PE4:FMC_A20" : 12, "PE4:LCD_B0" : 14, "PE4:SAI1_D2" : 2, "PE4:SAI1_FS_A" : 6, "PE4:SAI4_D2" : 10, "PE4:SAI4_FS_A" : 8, "PE4:SPI4_NSS" : 5, "PE4:TIM15_CH1N" : 4, "PE4:TRACED1" : 0, "PE5:DCMI_D6" : 13, "PE5:DFSDM_CKIN3" : 3, "PE5:EVENT-OUT" : 15, "PE5:FMC_A21" : 12, "PE5:LCD_G0" : 14, "PE5:SAI1_CK2" : 2, "PE5:SAI1_SCK_A" : 6, "PE5:SAI4_CK2" : 10, "PE5:SAI4_SCK_A" : 8, "PE5:SPI4_MISO" : 5, "PE5:TIM15_CH1" : 4, "PE5:TRACED2" : 0, "PE6:DCMI_D7" : 13, "PE6:EVENT-OUT" : 15, "PE6:FMC_A22" : 12, "PE6:LCD_G1" : 14, "PE6:SAI1_D1" : 2, "PE6:SAI1_SD_A" : 6, "PE6:SAI2_MCK_B" : 10, "PE6:SAI4_D1" : 9, "PE6:SAI4_SD_A" : 8, "PE6:SPI4_MOSI" : 5, "PE6:TIM15_CH2" : 4, "PE6:TIM1_BKIN2" : 1, "PE6:TIM1_BKIN2_COMP12" : 11, "PE6:TRACED3" : 0, "PE7:DFSDM_DATIN2" : 3, "PE7:EVENT-OUT" : 15, "PE7:FMC_D4" : 12, "PE7:FMC_DA4" : 12, "PE7:QUADSPI_BK2_IO0" : 10, "PE7:TIM1_ETR" : 1, "PE7:UART7_RX" : 7, "PE8:COMP_2_OUT" : 13, "PE8:DFSDM_CKIN2" : 3, "PE8:EVENT-OUT" : 15, "PE8:FMC_D5" : 12, "PE8:FMC_DA5" : 12, "PE8:QUADSPI_BK2_IO1" : 10, "PE8:TIM1_CH1N" : 1, "PE8:UART7_TX" : 7, "PE9:DFSDM_CKOUT" : 3, "PE9:EVENT-OUT" : 15, "PE9:FMC_D6" : 12, "PE9:FMC_DA6" : 12, "PE9:QUADSPI_BK2_IO2" : 10, "PE9:TIM1_CH1" : 1, "PE9:UART7_RTS" : 7, "PE10:DFSDM_DATIN4" : 3, "PE10:EVENT-OUT" : 15, "PE10:FMC_D7" : 12, "PE10:FMC_DA7" : 12, "PE10:QUADSPI_BK2_IO3" : 10, "PE10:TIM1_CH2N" : 1, "PE10:UART7_CTS" : 7, "PE11:DFSDM_CKIN4" : 3, "PE11:EVENT-OUT" : 15, "PE11:FMC_D8" : 12, "PE11:FMC_DA8" : 12, "PE11:LCD_G3" : 14, "PE11:SAI2_SD_B" : 10, "PE11:SPI4_NSS" : 5, "PE11:TIM1_CH2" : 1, "PE12:COMP_1_OUT" : 13, "PE12:DFSDM_DATIN5" : 3, "PE12:EVENT-OUT" : 15, "PE12:FMC_D9" : 12, "PE12:FMC_DA9" : 12, "PE12:LCD_B4" : 14, "PE12:SAI2_SCK_B" : 10, "PE12:SPI4_SCK" : 5, "PE12:TIM1_CH3N" : 1, "PE13:COMP_2_OUT" : 13, "PE13:DFSDM_CKIN5" : 3, "PE13:EVENT-OUT" : 15, "PE13:FMC_D10" : 12, "PE13:FMC_DA10" : 12, "PE13:LCD_DE" : 14, "PE13:SAI2_FS_B" : 10, "PE13:SPI4_MISO" : 5, "PE13:TIM1_CH3" : 1, "PE14:EVENT-OUT" : 15, "PE14:FMC_D11" : 12, "PE14:FMC_DA11" : 12, "PE14:LCD_CLK" : 14, "PE14:SAI2_MCK_B" : 10, "PE14:SPI4_MOSI" : 5, "PE14:TIM1_CH4" : 1, "PE15:EVENT-OUT" : 15, "PE15:FMC_D12" : 12, "PE15:FMC_DA12" : 12, "PE15:HDMI__TIM1_BKIN" : 5, "PE15:LCD_R7" : 14, "PE15:TIM1_BKIN" : 1, "PE15:TIM1_BKIN_COMP12" : 13, "PF0:EVENT-OUT" : 15, "PF0:FMC_A0" : 12, "PF0:I2C2_SDA" : 4, "PF1:EVENT-OUT" : 15, "PF1:FMC_A1" : 12, "PF1:I2C2_SCL" : 4, "PF2:EVENT-OUT" : 15, "PF2:FMC_A2" : 12, "PF2:I2C2_SMBA" : 4, "PF3:EVENT-OUT" : 15, "PF3:FMC_A3" : 12, "PF4:EVENT-OUT" : 15, "PF4:FMC_A4" : 12, "PF5:EVENT-OUT" : 15, "PF5:FMC_A5" : 12, "PF6:EVENT-OUT" : 15, "PF6:QUADSPI_BK1_IO3" : 9, "PF6:SAI1_SD_B" : 6, "PF6:SAI4_SD_B" : 8, "PF6:SPI5_NSS" : 5, "PF6:TIM16_CH1" : 1, "PF6:UART7_RX" : 7, "PF7:EVENT-OUT" : 15, "PF7:QUADSPI_BK1_IO2" : 9, "PF7:SAI1_MCLK_B" : 6, "PF7:SAI4_MCLK_B" : 8, "PF7:SPI5_SCK" : 5, "PF7:TIM17_CH1" : 1, "PF7:UART7_TX" : 7, "PF8:EVENT-OUT" : 15, "PF8:QUADSPI_BK1_IO0" : 10, "PF8:SAI1_SCK_B" : 6, "PF8:SAI4_SCK_B" : 8, "PF8:SPI5_MISO" : 5, "PF8:TIM13_CH1" : 9, "PF8:TIM16_CH1N" : 1, "PF8:UART7_RTS" : 7, "PF9:EVENT-OUT" : 15, "PF9:QUADSPI_BK1_IO1" : 10, "PF9:SAI1_FS_B" : 6, "PF9:SAI4_FS_B" : 8, "PF9:SPI5_MOSI" : 5, "PF9:TIM14_CH1" : 9, "PF9:TIM17_CH1N" : 1, "PF9:UART7_CTS" : 7, "PF10:DCMI_D11" : 13, "PF10:EVENT-OUT" : 15, "PF10:LCD_DE" : 14, "PF10:QUADSPI_CLK" : 9, "PF10:SAI1_D3" : 2, "PF10:SAI4_D3" : 10, "PF10:TIM16_BKIN" : 1, "PF11:DCMI_D12" : 13, "PF11:EVENT-OUT" : 15, "PF11:FMC_SDNRAS" : 12, "PF11:SAI2_SD_B" : 10, "PF11:SPI5_MOSI" : 5, "PF12:EVENT-OUT" : 15, "PF12:FMC_A6" : 12, "PF13:DFSDM_DATIN6" : 3, "PF13:EVENT-OUT" : 15, "PF13:FMC_A7" : 12, "PF13:I2C4_SMBA" : 4, "PF14:DFSDM_CKIN6" : 3, "PF14:EVENT-OUT" : 15, "PF14:FMC_A8" : 12, "PF14:I2C4_SCL" : 4, "PF15:EVENT-OUT" : 15, "PF15:FMC_A9" : 12, "PF15:I2C4_SDA" : 4, "PG0:EVENT-OUT" : 15, "PG0:FMC_A10" : 12, "PG1:EVENT-OUT" : 15, "PG1:FMC_A11" : 12, "PG2:EVENT-OUT" : 15, "PG2:FMC_A12" : 12, "PG2:TIM8_BKIN" : 3, "PG2:TIM8_BKIN_COMP12" : 11, "PG3:EVENT-OUT" : 15, "PG3:FMC_A13" : 12, "PG3:TIM8_BKIN2" : 3, "PG3:TIM8_BKIN2_COMP12" : 11, "PG4:EVENT-OUT" : 15, "PG4:FMC_A14" : 12, "PG4:FMC_BA0" : 12, "PG4:TIM1_BKIN2" : 1, "PG4:TIM1_BKIN2_COMP12" : 11, "PG5:EVENT-OUT" : 15, "PG5:FMC_A15" : 12, "PG5:FMC_BA1" : 12, "PG5:TIM1_ETR" : 1, "PG6:DCMI_D12" : 13, "PG6:EVENT-OUT" : 15, "PG6:FMC_NE3" : 12, "PG6:HRTIM_CHE1" : 2, "PG6:LCD_R7" : 14, "PG6:QUADSPI_BK1_NCS" : 10, "PG6:TIM17_BKIN" : 1, "PG7:DCMI_D13" : 13, "PG7:EVENT-OUT" : 15, "PG7:FMC_INT" : 12, "PG7:HRTIM_CHE2" : 2, "PG7:LCD_CLK" : 14, "PG7:SAI1_MCLK_A" : 6, "PG7:USART6_CK" : 7, "PG8:ETH_PPS_OUT" : 11, "PG8:EVENT-OUT" : 15, "PG8:FMC_SDCLK" : 12, "PG8:LCD_G7" : 14, "PG8:SPDIFRX_IN2" : 8, "PG8:SPI6_NSS" : 5, "PG8:TIM8_ETR" : 3, "PG8:USART6_RTS" : 7, "PG9:DCMI_VSYNC" : 13, "PG9:EVENT-OUT" : 15, "PG9:FMC_NCE" : 12, "PG9:FMC_NE2" : 12, "PG9:I2S1_SDI" : 5, "PG9:QUADSPI_BK2_IO2" : 9, "PG9:SAI2_FS_B" : 10, "PG9:SPDIFRX_IN3" : 8, "PG9:SPI1_MISO" : 5, "PG9:USART6_RX" : 7, "PG10:DCMI_D2" : 13, "PG10:EVENT-OUT" : 15, "PG10:FMC_NE3" : 12, "PG10:HRTIM_FLT5" : 2, "PG10:I2S1_WS" : 5, "PG10:LCD_B2" : 14, "PG10:LCD_G3" : 9, "PG10:SAI2_SD_B" : 10, "PG10:SPI1_NSS" : 5, "PG11:DCMI_D3" : 13, "PG11:ETH_MII_TX_EN" : 11, "PG11:ETH_RMII_TX_EN" : 11, "PG11:EVENT-OUT" : 15, "PG11:HRTIM_EEV4" : 2, "PG11:I2S1_CK" : 5, "PG11:LCD_B3" : 14, "PG11:SDMMC2_D2" : 10, "PG11:SPDIFRX_IN0" : 8, "PG11:SPI1_SCK" : 5, "PG12:ETH_MII_TXD1" : 11, "PG12:ETH_RMII_TXD1" : 11, "PG12:EVENT-OUT" : 15, "PG12:FMC_NE4" : 12, "PG12:HRTIM_EEV5" : 2, "PG12:LCD_B1" : 14, "PG12:LCD_B4" : 9, "PG12:LPTIM1_IN1" : 1, "PG12:SPDIFRX_IN1" : 8, "PG12:SPI6_MISO" : 5, "PG12:USART6_RTS" : 7, "PG13:ETH_MII_TXD0" : 11, "PG13:ETH_RMII_TXD0" : 11, "PG13:EVENT-OUT" : 15, "PG13:FMC_A24" : 12, "PG13:HRTIM_EEV10" : 2, "PG13:LCD_R0" : 14, "PG13:LPTIM1_OUT" : 1, "PG13:SPI6_SCK" : 5, "PG13:TRACED0" : 0, "PG13:USART6_CTS" : 7, "PG14:ETH_MII_TXD1" : 11, "PG14:ETH_RMII_TXD1" : 11, "PG14:EVENT-OUT" : 15, "PG14:FMC_A25" : 12, "PG14:LCD_B0" : 14, "PG14:LPTIM1_ETR" : 1, "PG14:QUADSPI_BK2_IO3" : 9, "PG14:SPI6_MOSI" : 5, "PG14:TRACED1" : 0, "PG14:USART6_TX" : 7, "PG15:DCMI_D13" : 13, "PG15:EVENT-OUT" : 15, "PG15:FMC_SDNCAS" : 12, "PG15:USART6_CTS" : 7, "PH0:EVENT-OUT" : 15, "PH1:EVENT-OUT" : 15, "PH2:ETH_MII_CRS" : 11, "PH2:EVENT-OUT" : 15, "PH2:FMC_SDCKE0" : 12, "PH2:LCD_R0" : 14, "PH2:LPTIM1_IN2" : 1, "PH2:QUADSPI_BK2_IO0" : 9, "PH2:SAI2_SCK_B" : 10, "PH3:ETH_MII_COL" : 11, "PH3:EVENT-OUT" : 15, "PH3:FMC_SDNE0" : 12, "PH3:LCD_R1" : 14, "PH3:QUADSPI_BK2_IO1" : 9, "PH3:SAI2_MCK_B" : 10, "PH4:EVENT-OUT" : 15, "PH4:I2C2_SCL" : 4, "PH4:LCD_G4" : 14, "PH4:LCD_G5" : 9, "PH4:OTG_HS_ULPI_NXT" : 10, "PH5:EVENT-OUT" : 15, "PH5:FMC_SDNWE" : 12, "PH5:I2C2_SDA" : 4, "PH5:SPI5_NSS" : 5, "PH6:DCMI_D8" : 13, "PH6:ETH_MII_RXD2" : 11, "PH6:EVENT-OUT" : 15, "PH6:FMC_SDNE1" : 12, "PH6:I2C2_SMBA" : 4, "PH6:SPI5_SCK" : 5, "PH6:TIM12_CH1" : 2, "PH7:DCMI_D9" : 13, "PH7:ETH_MII_RXD3" : 11, "PH7:EVENT-OUT" : 15, "PH7:FMC_SDCKE1" : 12, "PH7:I2C3_SCL" : 4, "PH7:SPI5_MISO" : 5, "PH8:DCMI_HSYNC" : 13, "PH8:EVENT-OUT" : 15, "PH8:FMC_D16" : 12, "PH8:I2C3_SDA" : 4, "PH8:LCD_R2" : 14, "PH8:TIM5_ETR" : 2, "PH9:DCMI_D0" : 13, "PH9:EVENT-OUT" : 15, "PH9:FMC_D17" : 12, "PH9:I2C3_SMBA" : 4, "PH9:LCD_R3" : 14, "PH9:TIM12_CH2" : 2, "PH10:DCMI_D1" : 13, "PH10:EVENT-OUT" : 15, "PH10:FMC_D18" : 12, "PH10:I2C4_SMBA" : 4, "PH10:LCD_R4" : 14, "PH10:TIM5_CH1" : 2, "PH11:DCMI_D2" : 13, "PH11:EVENT-OUT" : 15, "PH11:FMC_D19" : 12, "PH11:I2C4_SCL" : 4, "PH11:LCD_R5" : 14, "PH11:TIM5_CH2" : 2, "PH12:DCMI_D3" : 13, "PH12:EVENT-OUT" : 15, "PH12:FMC_D20" : 12, "PH12:I2C4_SDA" : 4, "PH12:LCD_R6" : 14, "PH12:TIM5_CH3" : 2, "PH13:EVENT-OUT" : 15, "PH13:CAN1_TX" : 9, "PH13:FMC_D21" : 12, "PH13:LCD_G2" : 14, "PH13:TIM8_CH1N" : 3, "PH13:UART4_TX" : 8, "PH14:DCMI_D4" : 13, "PH14:EVENT-OUT" : 15, "PH14:CAN1_RX" : 9, "PH14:FMC_D22" : 12, "PH14:LCD_G3" : 14, "PH14:TIM8_UCH2N" : 3, "PH14:UART4_RX" : 8, "PH15:DCMI_D11" : 13, "PH15:EVENT-OUT" : 15, "PH15:CAN1_TX" : 9, "PH15:FMC_D23" : 12, "PH15:LCD_G4" : 14, "PH15:TIM8_CH3N" : 3, "PI0:DCMI_D13" : 13, "PI0:EVENT-OUT" : 15, "PI0:CAN1_RX" : 9, "PI0:FMC_D24" : 12, "PI0:I2S2_WS" : 5, "PI0:LCD_G5" : 14, "PI0:SPI2_NSS" : 5, "PI0:TIM5_CH4" : 2, "PI1:DCMI_D8" : 13, "PI1:EVENT-OUT" : 15, "PI1:FMC_D25" : 12, "PI1:I2S2_CK" : 5, "PI1:LCD_G6" : 14, "PI1:SPI2_SCK" : 5, "PI1:TIM8_BKIN2" : 3, "PI1:TIM8_BKIN2_COMP12" : 11, "PI2:DCMI_D9" : 13, "PI2:EVENT-OUT" : 15, "PI2:FMC_D26" : 12, "PI2:I2S2_SDI" : 5, "PI2:LCD_G7" : 14, "PI2:SPI2_MISO" : 5, "PI2:TIM8_CH4" : 3, "PI3:DCMI_D10" : 13, "PI3:EVENT-OUT" : 15, "PI3:FMC_D27" : 12, "PI3:I2S2_SDO" : 5, "PI3:SPI2_MOSI" : 5, "PI3:TIM8_ETR" : 3, "PI4:DCMI_D5" : 13, "PI4:EVENT-OUT" : 15, "PI4:FMC_NBL2" : 12, "PI4:LCD_B4" : 14, "PI4:SAI2_MCK_A" : 10, "PI4:TIM8_BKIN" : 3, "PI4:TIM8_BKIN_COMP12" : 11, "PI5:DCMI_VSYNC" : 13, "PI5:EVENT-OUT" : 15, "PI5:FMC_NBL3" : 12, "PI5:LCD_B5" : 14, "PI5:SAI2_SCK_A" : 10, "PI5:TIM8_CH1" : 3, "PI6:DCMI_D6" : 13, "PI6:EVENT-OUT" : 15, "PI6:FMC_D28" : 12, "PI6:LCD_B6" : 14, "PI6:SAI2_SD_A" : 10, "PI6:TIM8_CH2" : 3, "PI7:DCMI_D7" : 13, "PI7:EVENT-OUT" : 15, "PI7:FMC_D29" : 12, "PI7:LCD_B7" : 14, "PI7:SAI2_FS_A" : 10, "PI7:TIM8_CH3" : 3, "PI8:EVENT-OUT" : 15, "PI9:EVENT-OUT" : 15, "PI9:CAN1_RX" : 9, "PI9:FMC_D30" : 12, "PI9:LCD_VSYNC" : 14, "PI9:UART4_RX" : 8, "PI10:ETH_MII_RX_ER" : 11, "PI10:EVENT-OUT" : 15, "PI10:CAN1_RX" : 9, "PI10:FMC_D31" : 12, "PI10:LCD_HSYNC" : 14, "PI11:EVENT-OUT" : 15, "PI11:LCD_G6" : 9, "PI11:OTG_HS_ULPI_DIR" : 10, "PI12:EVENT-OUT" : 15, "PI12:LCD_HSYNC" : 14, "PI13:EVENT-OUT" : 15, "PI13:LCD_VSYNC" : 14, "PI14:EVENT-OUT" : 15, "PI14:LCD_CLK" : 14, "PI15:EVENT-OUT" : 15, "PI15:LCD_G2" : 9, "PI15:LCD_R0" : 14, "PJ0:EVENT-OUT" : 15, "PJ0:LCD_R1" : 14, "PJ0:LCD_R7" : 9, "PJ1:EVENT-OUT" : 15, "PJ1:LCD_R2" : 14, "PJ2:EVENT-OUT" : 15, "PJ2:LCD_R3" : 14, "PJ3:EVENT-OUT" : 15, "PJ3:LCD_R4" : 14, "PJ4:EVENT-OUT" : 15, "PJ4:LCD_R5" : 14, "PJ5:EVENT-OUT" : 15, "PJ5:LCD_R6" : 14, "PJ6:EVENT-OUT" : 15, "PJ6:LCD_R7" : 14, "PJ6:TIM8_CH2" : 3, "PJ7:EVENT-OUT" : 15, "PJ7:LCD_G0" : 14, "PJ7:TIM8_CH2N" : 3, "PJ7:TRGIN" : 0, "PJ8:EVENT-OUT" : 15, "PJ8:LCD_G1" : 14, "PJ8:TIM1_CH3N" : 1, "PJ8:TIM8_CH1" : 3, "PJ8:UART8_TX" : 8, "PJ9:EVENT-OUT" : 15, "PJ9:LCD_G2" : 14, "PJ9:TIM1_CH3" : 1, "PJ9:TIM8_CH1N" : 3, "PJ9:UART8_RX" : 8, "PJ10:EVENT-OUT" : 15, "PJ10:LCD_G3" : 14, "PJ10:SPI5_MOSI" : 5, "PJ10:TIM1_CH2N" : 1, "PJ10:TIM8_CH2" : 3, "PJ11:EVENT-OUT" : 15, "PJ11:LCD_G4" : 14, "PJ11:SPI5_MISO" : 5, "PJ11:TIM1_CH2" : 1, "PJ11:TIM8_CH2N" : 3, "PJ12:EVENT-OUT" : 15, "PJ12:LCD_B0" : 14, "PJ12:LCD_G3" : 9, "PJ12:TRGOUT" : 0, "PJ13:EVENT-OUT" : 15, "PJ13:LCD_B1" : 14, "PJ13:LCD_B4" : 9, "PJ14:EVENT-OUT" : 15, "PJ14:LCD_B2" : 14, "PJ15:EVENT-OUT" : 15, "PJ15:LCD_B3" : 14, "PK0:EVENT-OUT" : 15, "PK0:LCD_G5" : 14, "PK0:SPI5_SCK" : 5, "PK0:TIM1_CH1N" : 1, "PK0:TIM8_CH3" : 3, "PK1:EVENT-OUT" : 15, "PK1:LCD_G6" : 14, "PK1:SPI5_NSS" : 5, "PK1:TIM1_CH1" : 1, "PK1:TIM8_CH3N" : 3, "PK2:EVENT-OUT" : 15, "PK2:LCD_G7" : 14, "PK2:TIM1_BKIN" : 1, "PK2:TIM1_BKIN_COMP12" : 11, "PK2:TIM8_BKIN" : 3, "PK2:TIM8_BKIN_COMP12" : 10, "PK3:EVENT-OUT" : 15, "PK3:LCD_B4" : 14, "PK4:EVENT-OUT" : 15, "PK4:LCD_B5" : 14, "PK5:EVENT-OUT" : 15, "PK5:LCD_B6" : 14, "PK6:EVENT-OUT" : 15, "PK6:LCD_B7" : 14, "PK7:EVENT-OUT" : 15, "PK7:LCD_DE" : 14, } ADC1_map = { # format is PIN : ADC1_CHAN "PF11" : 2, "PA6" : 3, "PC4" : 4, "PB1" : 5, "PF12" : 6, "PA7" : 7, "PC5" : 8, "PB0" : 9, "PC0" : 10, "PC1" : 11, "PC2" : 12, "PC3" : 13, "PA2" : 14, "PA3" : 15, "PA0" : 16, "PA1" : 17, "PA4" : 18, "PA5" : 19, }
gpl-3.0
dcifuen/cloudbday
src/lib/gae_mini_profiler/instrumented_profiler.py
13
2781
"""CPU profiler that works by instrumenting all function calls (uses cProfile). This profiler provides detailed function timings for all function calls during a request. This is just a simple wrapper for cProfile with result formatting. See http://docs.python.org/2/library/profile.html for more. PRO: since every function call is instrumented, you'll be sure to see everything that goes on during a request. For code that doesn't have lots of deeply nested function calls, this can be the easiest and most accurate way to get an idea for which functions are taking lots of time. CON: overhead is added to each function call due to this instrumentation. If you're profiling code with deeply nested function calls or tight loops going over lots of function calls, this perf overhead will add up. """ import cProfile import pstats import StringIO import util class Profile(object): """Profiler that wraps cProfile for programmatic access and reporting.""" def __init__(self): self.c_profile = cProfile.Profile() def results(self): """Return cProfile results in a dictionary for template context.""" # Make sure nothing is printed to stdout output = StringIO.StringIO() stats = pstats.Stats(self.c_profile, stream=output) stats.sort_stats("cumulative") results = { "total_call_count": stats.total_calls, "total_time": util.seconds_fmt(stats.total_tt), "calls": [] } width, list_func_names = stats.get_print_list([80]) for func_name in list_func_names: primitive_call_count, total_call_count, total_time, cumulative_time, callers = stats.stats[func_name] func_desc = pstats.func_std_string(func_name) callers_names = map(lambda func_name: pstats.func_std_string(func_name), callers.keys()) callers_desc = map( lambda name: {"func_desc": name, "func_desc_short": util.short_method_fmt(name)}, callers_names) results["calls"].append({ "primitive_call_count": primitive_call_count, "total_call_count": total_call_count, "cumulative_time": util.seconds_fmt(cumulative_time, 2), "per_call_cumulative": util.seconds_fmt(cumulative_time / primitive_call_count, 2) if primitive_call_count else "", "func_desc": func_desc, "func_desc_short": util.short_method_fmt(func_desc), "callers_desc": callers_desc, }) output.close() return results def run(self, fxn): """Run function with cProfile enabled, saving results.""" return self.c_profile.runcall(lambda *args, **kwargs: fxn(), None, None)
mit
837468220/python-for-android
python-build/python-libs/gdata/src/gdata/apps/migration/__init__.py
168
8177
#!/usr/bin/python # # Copyright (C) 2008 Google # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains objects used with Google Apps.""" __author__ = 'google-apps-apis@googlegroups.com' import atom import gdata # XML namespaces which are often used in Google Apps entity. APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' class Rfc822Msg(atom.AtomBase): """The Migration rfc822Msg element.""" _tag = 'rfc822Msg' _namespace = APPS_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['encoding'] = 'encoding' def __init__(self, extension_elements=None, extension_attributes=None, text=None): self.text = text self.encoding = 'base64' self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def Rfc822MsgFromString(xml_string): """Parse in the Rrc822 message from the XML definition.""" return atom.CreateClassFromXMLString(Rfc822Msg, xml_string) class MailItemProperty(atom.AtomBase): """The Migration mailItemProperty element.""" _tag = 'mailItemProperty' _namespace = APPS_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, value=None, extension_elements=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def MailItemPropertyFromString(xml_string): """Parse in the MailItemProperiy from the XML definition.""" return atom.CreateClassFromXMLString(MailItemProperty, xml_string) class Label(atom.AtomBase): """The Migration label element.""" _tag = 'label' _namespace = APPS_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['labelName'] = 'label_name' def __init__(self, label_name=None, extension_elements=None, extension_attributes=None, text=None): self.label_name = label_name self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def LabelFromString(xml_string): """Parse in the mailItemProperty from the XML definition.""" return atom.CreateClassFromXMLString(Label, xml_string) class MailEntry(gdata.GDataEntry): """A Google Migration flavor of an Atom Entry.""" _tag = 'entry' _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg) _children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property', [MailItemProperty]) _children['{%s}label' % APPS_NAMESPACE] = ('label', [Label]) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rfc822_msg=None, mail_item_property=None, label=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None): gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated) self.rfc822_msg = rfc822_msg self.mail_item_property = mail_item_property self.label = label self.extended_property = extended_property or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def MailEntryFromString(xml_string): """Parse in the MailEntry from the XML definition.""" return atom.CreateClassFromXMLString(MailEntry, xml_string) class BatchMailEntry(gdata.BatchEntry): """A Google Migration flavor of an Atom Entry.""" _tag = gdata.BatchEntry._tag _namespace = gdata.BatchEntry._namespace _children = gdata.BatchEntry._children.copy() _attributes = gdata.BatchEntry._attributes.copy() _children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg) _children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property', [MailItemProperty]) _children['{%s}label' % APPS_NAMESPACE] = ('label', [Label]) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rfc822_msg=None, mail_item_property=None, label=None, batch_operation=None, batch_id=None, batch_status=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None): gdata.BatchEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, batch_operation=batch_operation, batch_id=batch_id, batch_status=batch_status, title=title, updated=updated) self.rfc822_msg = rfc822_msg or None self.mail_item_property = mail_item_property or [] self.label = label or [] self.extended_property = extended_property or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def BatchMailEntryFromString(xml_string): """Parse in the BatchMailEntry from the XML definition.""" return atom.CreateClassFromXMLString(BatchMailEntry, xml_string) class BatchMailEventFeed(gdata.BatchFeed): """A Migration event feed flavor of an Atom Feed.""" _tag = gdata.BatchFeed._tag _namespace = gdata.BatchFeed._namespace _children = gdata.BatchFeed._children.copy() _attributes = gdata.BatchFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry]) def __init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, extension_elements=None, extension_attributes=None, text=None): gdata.BatchFeed.__init__(self, author=author, category=category, contributor=contributor, generator=generator, icon=icon, atom_id=atom_id, link=link, logo=logo, rights=rights, subtitle=subtitle, title=title, updated=updated, entry=entry, total_results=total_results, start_index=start_index, items_per_page=items_per_page, interrupted=interrupted, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) def BatchMailEventFeedFromString(xml_string): """Parse in the BatchMailEventFeed from the XML definition.""" return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string)
apache-2.0
eayunstack/nova
nova/virt/xenapi/agent.py
13
17806
# Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import binascii from distutils import version import os import sys import time import uuid from oslo.config import cfg from nova.api.metadata import password from nova.compute import utils as compute_utils from nova import context from nova import crypto from nova import exception from nova.i18n import _ from nova import objects from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import utils USE_AGENT_KEY = "xenapi_use_agent" USE_AGENT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + USE_AGENT_KEY SKIP_SSH_KEY = "xenapi_skip_agent_inject_ssh" SKIP_SSH_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + SKIP_SSH_KEY SKIP_FILES_AT_BOOT_KEY = "xenapi_skip_agent_inject_files_at_boot" SKIP_FILES_AT_BOOT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX \ + SKIP_FILES_AT_BOOT_KEY LOG = logging.getLogger(__name__) xenapi_agent_opts = [ cfg.IntOpt('agent_timeout', default=30, help='Number of seconds to wait for agent reply'), cfg.IntOpt('agent_version_timeout', default=300, help='Number of seconds to wait for agent ' 'to be fully operational'), cfg.IntOpt('agent_resetnetwork_timeout', default=60, help='Number of seconds to wait for agent reply ' 'to resetnetwork request'), cfg.StrOpt('agent_path', default='usr/sbin/xe-update-networking', help='Specifies the path in which the XenAPI guest agent ' 'should be located. If the agent is present, network ' 'configuration is not injected into the image. ' 'Used if compute_driver=xenapi.XenAPIDriver and ' 'flat_injected=True'), cfg.BoolOpt('disable_agent', default=False, help='Disables the use of the XenAPI agent in any image ' 'regardless of what image properties are present.'), cfg.BoolOpt('use_agent_default', default=False, help='Determines if the XenAPI agent should be used when ' 'the image used does not contain a hint to declare if ' 'the agent is present or not. ' 'The hint is a glance property "' + USE_AGENT_KEY + '" ' 'that has the value "True" or "False". ' 'Note that waiting for the agent when it is not present ' 'will significantly increase server boot times.'), ] CONF = cfg.CONF CONF.register_opts(xenapi_agent_opts, 'xenserver') def _call_agent(session, instance, vm_ref, method, addl_args=None, timeout=None, success_codes=None): """Abstracts out the interaction with the agent xenapi plugin.""" if addl_args is None: addl_args = {} if timeout is None: timeout = CONF.xenserver.agent_timeout if success_codes is None: success_codes = ['0'] # always fetch domid because VM may have rebooted dom_id = session.VM.get_domid(vm_ref) args = { 'id': str(uuid.uuid4()), 'dom_id': str(dom_id), 'timeout': str(timeout), } args.update(addl_args) try: ret = session.call_plugin('agent', method, args) except session.XenAPI.Failure as e: err_msg = e.details[-1].splitlines()[-1] if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' 'args=%(args)r'), {'method': method, 'args': args}, instance=instance) raise exception.AgentTimeout(method=method) elif 'NOT IMPLEMENTED:' in err_msg: LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not ' 'supported by the agent. args=%(args)r'), {'method': method, 'args': args}, instance=instance) raise exception.AgentNotImplemented(method=method) else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' 'args=%(args)r'), {'method': method, 'args': args, 'e': e}, instance=instance) raise exception.AgentError(method=method) if not isinstance(ret, dict): try: ret = jsonutils.loads(ret) except TypeError: LOG.error(_('The agent call to %(method)s returned an invalid ' 'response: %(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) raise exception.AgentError(method=method) if ret['returncode'] not in success_codes: LOG.error(_('The agent call to %(method)s returned an ' 'an error: %(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) raise exception.AgentError(method=method) LOG.debug('The agent call to %(method)s was successful: ' '%(ret)r. args=%(args)r', {'method': method, 'ret': ret, 'args': args}, instance=instance) # Some old versions of the Windows agent have a trailing \\r\\n # (ie CRLF escaped) for some reason. Strip that off. return ret['message'].replace('\\r\\n', '') def is_upgrade_required(current_version, available_version): # NOTE(johngarbutt): agent version numbers are four part, # so we need to use the loose version to compare them current = version.LooseVersion(current_version) available = version.LooseVersion(available_version) return available > current class XenAPIBasedAgent(object): def __init__(self, session, virtapi, instance, vm_ref): self.session = session self.virtapi = virtapi self.instance = instance self.vm_ref = vm_ref def _add_instance_fault(self, error, exc_info): LOG.warning(_("Ignoring error while configuring instance with " "agent: %s") % error, instance=self.instance, exc_info=True) try: ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc( ctxt, self.instance, error, exc_info=exc_info) except Exception: LOG.debug("Error setting instance fault.", exc_info=True) def _call_agent(self, method, addl_args=None, timeout=None, success_codes=None, ignore_errors=True): try: return _call_agent(self.session, self.instance, self.vm_ref, method, addl_args, timeout, success_codes) except exception.AgentError as error: if ignore_errors: self._add_instance_fault(error, sys.exc_info()) else: raise def get_version(self): LOG.debug('Querying agent version', instance=self.instance) # The agent can be slow to start for a variety of reasons. On Windows, # it will generally perform a setup process on first boot that can # take a couple of minutes and then reboot. On Linux, the system can # also take a while to boot. expiration = time.time() + CONF.xenserver.agent_version_timeout while True: try: # NOTE(johngarbutt): we can't use the xapi plugin # timeout, because the domid may change when # the server is rebooted return self._call_agent('version', ignore_errors=False) except exception.AgentError as error: if time.time() > expiration: self._add_instance_fault(error, sys.exc_info()) return def _get_expected_build(self): ctxt = context.get_admin_context() agent_build = objects.Agent.get_by_triple( ctxt, 'xen', self.instance['os_type'], self.instance['architecture']) if agent_build: LOG.debug('Latest agent build for %(hypervisor)s/%(os)s' '/%(architecture)s is %(version)s', { 'hypervisor': agent_build.hypervisor, 'os': agent_build.os, 'architecture': agent_build.architecture, 'version': agent_build.version}) else: LOG.debug('No agent build found for %(hypervisor)s/%(os)s' '/%(architecture)s', { 'hypervisor': 'xen', 'os': self.instance['os_type'], 'architecture': self.instance['architecture']}) return agent_build def update_if_needed(self, version): agent_build = self._get_expected_build() if version and agent_build and \ is_upgrade_required(version, agent_build['version']): LOG.debug('Updating agent to %s', agent_build['version'], instance=self.instance) self._perform_update(agent_build) else: LOG.debug('Skipping agent update.', instance=self.instance) def _perform_update(self, agent_build): args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']} try: self._call_agent('agentupdate', args) except exception.AgentError as exc: # Silently fail for agent upgrades LOG.warning(_("Unable to update the agent due " "to: %(exc)s") % dict(exc=exc), instance=self.instance) def _exchange_key_with_agent(self): dh = SimpleDH() args = {'pub': str(dh.get_public())} resp = self._call_agent('key_init', args, success_codes=['D0'], ignore_errors=False) agent_pub = int(resp) dh.compute_shared(agent_pub) return dh def _save_instance_password_if_sshkey_present(self, new_pass): sshkey = self.instance.get('key_data') if sshkey and sshkey.startswith("ssh-rsa"): ctxt = context.get_admin_context() enc = crypto.ssh_encrypt_text(sshkey, new_pass) self.instance.system_metadata.update( password.convert_password(ctxt, base64.b64encode(enc))) self.instance.save() def set_admin_password(self, new_pass): """Set the root/admin password on the VM instance. This is done via an agent running on the VM. Communication between nova and the agent is done via writing xenstore records. Since communication is done over the XenAPI RPC calls, we need to encrypt the password. We're using a simple Diffie-Hellman class instead of a more advanced library (such as M2Crypto) for compatibility with the agent code. """ LOG.debug('Setting admin password', instance=self.instance) try: dh = self._exchange_key_with_agent() except exception.AgentError as error: self._add_instance_fault(error, sys.exc_info()) return # Some old versions of Linux and Windows agent expect trailing \n # on password to work correctly. enc_pass = dh.encrypt(new_pass + '\n') args = {'enc_pass': enc_pass} self._call_agent('password', args) self._save_instance_password_if_sshkey_present(new_pass) def inject_ssh_key(self): sshkey = self.instance.get('key_data') if not sshkey: return if self.instance['os_type'] == 'windows': LOG.debug("Skipping setting of ssh key for Windows.", instance=self.instance) return if self._skip_ssh_key_inject(): LOG.debug("Skipping agent ssh key injection for this image.", instance=self.instance) return sshkey = str(sshkey) keyfile = '/root/.ssh/authorized_keys' key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', sshkey.strip(), '\n', ]) return self.inject_file(keyfile, key_data) def inject_files(self, injected_files): if self._skip_inject_files_at_boot(): LOG.debug("Skipping agent file injection for this image.", instance=self.instance) else: for path, contents in injected_files: self.inject_file(path, contents) def inject_file(self, path, contents): LOG.debug('Injecting file path: %r', path, instance=self.instance) # Files/paths must be base64-encoded for transmission to agent b64_path = base64.b64encode(path) b64_contents = base64.b64encode(contents) args = {'b64_path': b64_path, 'b64_contents': b64_contents} return self._call_agent('inject_file', args) def resetnetwork(self): LOG.debug('Resetting network', instance=self.instance) # NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success return self._call_agent('resetnetwork', timeout=CONF.xenserver.agent_resetnetwork_timeout, success_codes=['0', '500']) def _skip_ssh_key_inject(self): return self._get_sys_meta_key(SKIP_SSH_SM_KEY) def _skip_inject_files_at_boot(self): return self._get_sys_meta_key(SKIP_FILES_AT_BOOT_SM_KEY) def _get_sys_meta_key(self, key): sys_meta = utils.instance_sys_meta(self.instance) raw_value = sys_meta.get(key, 'False') return strutils.bool_from_string(raw_value, strict=False) def find_guest_agent(base_dir): """tries to locate a guest agent at the path specified by agent_rel_path """ if CONF.xenserver.disable_agent: return False agent_rel_path = CONF.xenserver.agent_path agent_path = os.path.join(base_dir, agent_rel_path) if os.path.isfile(agent_path): # The presence of the guest agent # file indicates that this instance can # reconfigure the network from xenstore data, # so manipulation of files in /etc is not # required LOG.info(_('XenServer tools installed in this ' 'image are capable of network injection. ' 'Networking files will not be' 'manipulated')) return True xe_daemon_filename = os.path.join(base_dir, 'usr', 'sbin', 'xe-daemon') if os.path.isfile(xe_daemon_filename): LOG.info(_('XenServer tools are present ' 'in this image but are not capable ' 'of network injection')) else: LOG.info(_('XenServer tools are not ' 'installed in this image')) return False def should_use_agent(instance): sys_meta = utils.instance_sys_meta(instance) if USE_AGENT_SM_KEY not in sys_meta: return CONF.xenserver.use_agent_default else: use_agent_raw = sys_meta[USE_AGENT_SM_KEY] try: return strutils.bool_from_string(use_agent_raw, strict=True) except ValueError: LOG.warn(_("Invalid 'agent_present' value. " "Falling back to the default."), instance=instance) return CONF.xenserver.use_agent_default class SimpleDH(object): """This class wraps all the functionality needed to implement basic Diffie-Hellman-Merkle key exchange in Python. It features intelligent defaults for the prime and base numbers needed for the calculation, while allowing you to supply your own. It requires that the openssl binary be installed on the system on which this is run, as it uses that to handle the encryption and decryption. If openssl is not available, a RuntimeError will be raised. """ def __init__(self): self._prime = 162259276829213363391578010288127 self._base = 5 self._public = None self._shared = None self.generate_private() def generate_private(self): self._private = int(binascii.hexlify(os.urandom(10)), 16) return self._private def get_public(self): self._public = pow(self._base, self._private, self._prime) return self._public def compute_shared(self, other): self._shared = pow(other, self._private, self._prime) return self._shared def _run_ssl(self, text, decrypt=False): cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass', 'pass:%s' % self._shared, '-nosalt'] if decrypt: cmd.append('-d') out, err = utils.execute(*cmd, process_input=text) if err: raise RuntimeError(_('OpenSSL error: %s') % err) return out def encrypt(self, text): return self._run_ssl(text).strip('\n') def decrypt(self, text): return self._run_ssl(text, decrypt=True)
apache-2.0
mapennell/ansible
v1/ansible/module_utils/ec2.py
127
7586
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. try: from distutils.version import LooseVersion HAS_LOOSE_VERSION = True except: HAS_LOOSE_VERSION = False def aws_common_argument_spec(): return dict( ec2_url=dict(), aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), validate_certs=dict(default=True, type='bool'), security_token=dict(aliases=['access_token'], no_log=True), profile=dict(), ) def ec2_argument_spec(): spec = aws_common_argument_spec() spec.update( dict( region=dict(aliases=['aws_region', 'ec2_region']), ) ) return spec def boto_supports_profile_name(): return hasattr(boto.ec2.EC2Connection, 'profile_name') def get_aws_connection_info(module): # Check module args for credentials, then check environment vars # access_key ec2_url = module.params.get('ec2_url') access_key = module.params.get('aws_access_key') secret_key = module.params.get('aws_secret_key') security_token = module.params.get('security_token') region = module.params.get('region') profile_name = module.params.get('profile') validate_certs = module.params.get('validate_certs') if not ec2_url: if 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] elif 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] if not access_key: if 'AWS_ACCESS_KEY_ID' in os.environ: access_key = os.environ['AWS_ACCESS_KEY_ID'] elif 'AWS_ACCESS_KEY' in os.environ: access_key = os.environ['AWS_ACCESS_KEY'] elif 'EC2_ACCESS_KEY' in os.environ: access_key = os.environ['EC2_ACCESS_KEY'] else: # in case access_key came in as empty string access_key = None if not secret_key: if 'AWS_SECRET_ACCESS_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif 'AWS_SECRET_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_KEY'] elif 'EC2_SECRET_KEY' in os.environ: secret_key = os.environ['EC2_SECRET_KEY'] else: # in case secret_key came in as empty string secret_key = None if not region: if 'AWS_REGION' in os.environ: region = os.environ['AWS_REGION'] elif 'EC2_REGION' in os.environ: region = os.environ['EC2_REGION'] else: # boto.config.get returns None if config not found region = boto.config.get('Boto', 'aws_region') if not region: region = boto.config.get('Boto', 'ec2_region') if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: security_token = os.environ['AWS_SECURITY_TOKEN'] elif 'EC2_SECURITY_TOKEN' in os.environ: security_token = os.environ['EC2_SECURITY_TOKEN'] else: # in case security_token came in as empty string security_token = None boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=security_token) # profile_name only works as a key in boto >= 2.24 # so only set profile_name if passed as an argument if profile_name: if not boto_supports_profile_name(): module.fail_json("boto does not support profile_name before 2.24") boto_params['profile_name'] = profile_name if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): boto_params['validate_certs'] = validate_certs return region, ec2_url, boto_params def get_ec2_creds(module): ''' for compatibility mode with old modules that don't/can't yet use ec2_connect method ''' region, ec2_url, boto_params = get_aws_connection_info(module) return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region def boto_fix_security_token_in_profile(conn, profile_name): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + profile_name if boto.config.has_option(profile, 'aws_security_token'): conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token')) return conn def connect_to_aws(aws_module, region, **params): conn = aws_module.connect_to_region(region, **params) if not conn: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__)) else: raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): conn = boto_fix_security_token_in_profile(conn, params['profile_name']) return conn def ec2_connect(module): """ Return an ec2 connection""" region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") return ec2
gpl-3.0
dennis-sheil/commandergenius
project/jni/python/src/Lib/test/test_structmembers.py
56
3373
from _testcapi import test_structmembersType, \ CHAR_MAX, CHAR_MIN, UCHAR_MAX, \ SHRT_MAX, SHRT_MIN, USHRT_MAX, \ INT_MAX, INT_MIN, UINT_MAX, \ LONG_MAX, LONG_MIN, ULONG_MAX, \ LLONG_MAX, LLONG_MIN, ULLONG_MAX import warnings, exceptions, unittest, sys from test import test_support ts=test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8, 9.99999, 10.1010101010) class ReadWriteTests(unittest.TestCase): def test_types(self): ts.T_BOOL = True self.assertEquals(ts.T_BOOL, True) ts.T_BOOL = False self.assertEquals(ts.T_BOOL, False) self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1) ts.T_BYTE = CHAR_MAX self.assertEquals(ts.T_BYTE, CHAR_MAX) ts.T_BYTE = CHAR_MIN self.assertEquals(ts.T_BYTE, CHAR_MIN) ts.T_UBYTE = UCHAR_MAX self.assertEquals(ts.T_UBYTE, UCHAR_MAX) ts.T_SHORT = SHRT_MAX self.assertEquals(ts.T_SHORT, SHRT_MAX) ts.T_SHORT = SHRT_MIN self.assertEquals(ts.T_SHORT, SHRT_MIN) ts.T_USHORT = USHRT_MAX self.assertEquals(ts.T_USHORT, USHRT_MAX) ts.T_INT = INT_MAX self.assertEquals(ts.T_INT, INT_MAX) ts.T_INT = INT_MIN self.assertEquals(ts.T_INT, INT_MIN) ts.T_UINT = UINT_MAX self.assertEquals(ts.T_UINT, UINT_MAX) ts.T_LONG = LONG_MAX self.assertEquals(ts.T_LONG, LONG_MAX) ts.T_LONG = LONG_MIN self.assertEquals(ts.T_LONG, LONG_MIN) ts.T_ULONG = ULONG_MAX self.assertEquals(ts.T_ULONG, ULONG_MAX) ## T_LONGLONG and T_ULONGLONG may not be present on some platforms if hasattr(ts, 'T_LONGLONG'): ts.T_LONGLONG = LLONG_MAX self.assertEquals(ts.T_LONGLONG, LLONG_MAX) ts.T_LONGLONG = LLONG_MIN self.assertEquals(ts.T_LONGLONG, LLONG_MIN) ts.T_ULONGLONG = ULLONG_MAX self.assertEquals(ts.T_ULONGLONG, ULLONG_MAX) ## make sure these will accept a plain int as well as a long ts.T_LONGLONG = 3 self.assertEquals(ts.T_LONGLONG, 3) ts.T_ULONGLONG = 4 self.assertEquals(ts.T_ULONGLONG, 4) class TestWarnings(unittest.TestCase): def has_warned(self, w): self.assertEqual(w.category, RuntimeWarning) def test_byte_max(self): with test_support.check_warnings() as w: ts.T_BYTE = CHAR_MAX+1 self.has_warned(w) def test_byte_min(self): with test_support.check_warnings() as w: ts.T_BYTE = CHAR_MIN-1 self.has_warned(w) def test_ubyte_max(self): with test_support.check_warnings() as w: ts.T_UBYTE = UCHAR_MAX+1 self.has_warned(w) def test_short_max(self): with test_support.check_warnings() as w: ts.T_SHORT = SHRT_MAX+1 self.has_warned(w) def test_short_min(self): with test_support.check_warnings() as w: ts.T_SHORT = SHRT_MIN-1 self.has_warned(w) def test_ushort_max(self): with test_support.check_warnings() as w: ts.T_USHORT = USHRT_MAX+1 self.has_warned(w) def test_main(verbose=None): test_support.run_unittest(__name__) if __name__ == "__main__": test_main(verbose=True)
lgpl-2.1
livlogik/Evil_Yummy_Gumdrop--Tmo-V10-Kernel
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
malderete/ninja-ide
ninja_ide/gui/editor/checkers/migration_lists.py
6
6160
# -*- coding: utf-8 -*- # # This file is part of NINJA-IDE (http://ninja-ide.org). # # NINJA-IDE is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # any later version. # # NINJA-IDE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from PyQt4.QtGui import QDialog from PyQt4.QtGui import QListWidget from PyQt4.QtGui import QListWidgetItem from PyQt4.QtGui import QLabel from PyQt4.QtGui import QHBoxLayout from PyQt4.QtGui import QVBoxLayout from PyQt4.QtGui import QPushButton from PyQt4.QtGui import QSpacerItem from PyQt4.QtGui import QSizePolicy from PyQt4.QtGui import QPlainTextEdit from PyQt4.QtGui import QTextCursor from PyQt4.QtCore import Qt from PyQt4.QtCore import SIGNAL from ninja_ide import translations from ninja_ide.core import settings from ninja_ide.gui.ide import IDE from ninja_ide.gui.explorer.explorer_container import ExplorerContainer class MigrationWidget(QDialog): """2to3 Migration Assistance Widget Class""" def __init__(self, parent=None): super(MigrationWidget, self).__init__(parent, Qt.WindowStaysOnTopHint) self._migration, vbox, hbox = {}, QVBoxLayout(self), QHBoxLayout() lbl_title = QLabel(translations.TR_CURRENT_CODE) lbl_suggestion = QLabel(translations.TR_SUGGESTED_CHANGES) self.current_list, self.suggestion = QListWidget(), QPlainTextEdit() self.suggestion.setReadOnly(True) self.btn_apply = QPushButton(translations.TR_APPLY_CHANGES + " !") self.suggestion.setToolTip(translations.TR_SAVE_BEFORE_APPLY + " !") self.btn_apply.setToolTip(translations.TR_SAVE_BEFORE_APPLY + " !") # pack up all widgets hbox.addSpacerItem(QSpacerItem(1, 0, QSizePolicy.Expanding)) hbox.addWidget(self.btn_apply) vbox.addWidget(lbl_title) vbox.addWidget(self.current_list) vbox.addWidget(lbl_suggestion) vbox.addWidget(self.suggestion) vbox.addLayout(hbox) # connections self.connect( self.current_list, SIGNAL("itemClicked(QListWidgetItem*)"), self.load_suggestion) self.connect(self.btn_apply, SIGNAL("clicked()"), self.apply_changes) # registers IDE.register_service('tab_migration', self) ExplorerContainer.register_tab(translations.TR_TAB_MIGRATION, self) def install_tab(self): """Install the Tab on the IDE.""" ide = IDE.get_service('ide') self.connect(ide, SIGNAL("goingDown()"), self.close) def apply_changes(self): """Apply the suggested changes on the Python code.""" lineno = int(self.current_list.currentItem().data(Qt.UserRole)) lines = self._migration[lineno][0].split('\n') remove, code = -1, "" for line in lines: if line.startswith('-'): remove += 1 # line to remove elif line.startswith('+'): code += '{line_to_add}\n'.format(line_to_add=line[1:]) # get and apply changes on editor main_container = IDE.get_service('main_container') if main_container: editorWidget = main_container.get_current_editor() block_start = editorWidget.document().findBlockByLineNumber(lineno) block_end = editorWidget.document().findBlockByLineNumber(lineno + remove) cursor = editorWidget.textCursor() cursor.setPosition(block_start.position()) cursor.setPosition(block_end.position(), QTextCursor.KeepAnchor) cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor) cursor.insertText(code[:-1]) def load_suggestion(self, item): """Take an argument item and load the suggestion.""" lineno, code = int(item.data(Qt.UserRole)), "" lines = self._migration[lineno][0].split('\n') for line in lines: if line.startswith('+'): code += '{line_to_add}\n'.format(line_to_add=line[1:]) self.suggestion.setPlainText(code) main_container = IDE.get_service('main_container') if main_container: editorWidget = main_container.get_current_editor() if editorWidget: editorWidget.jump_to_line(lineno) editorWidget.setFocus() def refresh_lists(self, migration): """Refresh the list of code suggestions.""" self._migration, base_lineno = migration, -1 self.current_list.clear() for lineno in sorted(migration.keys()): linenostr = 'L{line_number}\n'.format(line_number=str(lineno + 1)) data = migration[lineno] lines = data[0].split('\n') if base_lineno == data[1]: continue base_lineno = data[1] message = '' for line in lines: if line.startswith('-'): message += '{line_to_load}\n'.format(line_to_load=line) item = QListWidgetItem(linenostr + message) item.setToolTip(linenostr + message) item.setData(Qt.UserRole, lineno) self.current_list.addItem(item) def clear(self): """Clear the widget.""" self.current_list.clear() self.suggestion.clear() def reject(self): """Reject""" if self.parent() is None: self.emit(SIGNAL("dockWidget(PyQt_PyObject)"), self) def closeEvent(self, event): """Close""" self.emit(SIGNAL("dockWidget(PyQt_PyObject)"), self) event.ignore() migrationWidget = MigrationWidget() if settings.SHOW_MIGRATION_LIST else None
gpl-3.0
pombredanne/teamwork
wsgi/static/Brython2.1.0-20140419-113919/Lib/signal.py
743
1646
"""This module provides mechanisms to use signal handlers in Python. Functions: alarm() -- cause SIGALRM after a specified time [Unix only] setitimer() -- cause a signal (described below) after a specified float time and the timer may restart then [Unix only] getitimer() -- get current value of timer [Unix only] signal() -- set the action for a given signal getsignal() -- get the signal action for a given signal pause() -- wait until a signal arrives [Unix only] default_int_handler() -- default SIGINT handler signal constants: SIG_DFL -- used to refer to the system default handler SIG_IGN -- used to ignore the signal NSIG -- number of defined signals SIGINT, SIGTERM, etc. -- signal numbers itimer constants: ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon expiration ITIMER_VIRTUAL -- decrements only when the process is executing, and delivers SIGVTALRM upon expiration ITIMER_PROF -- decrements both when the process is executing and when the system is executing on behalf of the process. Coupled with ITIMER_VIRTUAL, this timer is usually used to profile the time spent by the application in user and kernel space. SIGPROF is delivered upon expiration. *** IMPORTANT NOTICE *** A signal handler function is called with two arguments: the first is the signal number, the second is the interrupted stack frame.""" CTRL_BREAK_EVENT=1 CTRL_C_EVENT=0 NSIG=23 SIGABRT=22 SIGBREAK=21 SIGFPE=8 SIGILL=4 SIGINT=2 SIGSEGV=11 SIGTERM=15 SIG_DFL=0 SIG_IGN=1 def signal(signalnum, handler) : pass
gpl-2.0
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/setuptools/command/install.py
529
4683
from distutils.errors import DistutilsArgError import inspect import glob import warnings import platform import distutils.command.install as orig import setuptools # Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for # now. See https://github.com/pypa/setuptools/issues/199/ _install = orig.install class install(orig.install): """Use easy_install to install the package, w/dependencies""" user_options = orig.install.user_options + [ ('old-and-unmanageable', None, "Try not to use this!"), ('single-version-externally-managed', None, "used by system package builders to create 'flat' eggs"), ] boolean_options = orig.install.boolean_options + [ 'old-and-unmanageable', 'single-version-externally-managed', ] new_commands = [ ('install_egg_info', lambda self: True), ('install_scripts', lambda self: True), ] _nc = dict(new_commands) def initialize_options(self): orig.install.initialize_options(self) self.old_and_unmanageable = None self.single_version_externally_managed = None def finalize_options(self): orig.install.finalize_options(self) if self.root: self.single_version_externally_managed = True elif self.single_version_externally_managed: if not self.root and not self.record: raise DistutilsArgError( "You must specify --record or --root when building system" " packages" ) def handle_extra_path(self): if self.root or self.single_version_externally_managed: # explicit backward-compatibility mode, allow extra_path to work return orig.install.handle_extra_path(self) # Ignore extra_path when installing an egg (or being run by another # command without --root or --single-version-externally-managed self.path_file = None self.extra_dirs = '' def run(self): # Explicit request for old-style install? Just do it if self.old_and_unmanageable or self.single_version_externally_managed: return orig.install.run(self) if not self._called_from_setup(inspect.currentframe()): # Run in backward-compatibility mode to support bdist_* commands. orig.install.run(self) else: self.do_egg_install() @staticmethod def _called_from_setup(run_frame): """ Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise. """ if run_frame is None: msg = "Call stack not available. bdist_* commands may fail." warnings.warn(msg) if platform.python_implementation() == 'IronPython': msg = "For best results, pass -X:Frames to enable call stack." warnings.warn(msg) return True res = inspect.getouterframes(run_frame)[2] caller, = res[:1] info = inspect.getframeinfo(caller) caller_module = caller.f_globals.get('__name__', '') return ( caller_module == 'distutils.dist' and info.function == 'run_commands' ) def do_egg_install(self): easy_install = self.distribution.get_command_class('easy_install') cmd = easy_install( self.distribution, args="x", root=self.root, record=self.record, ) cmd.ensure_finalized() # finalize before bdist_egg munges install cmd cmd.always_copy_from = '.' # make sure local-dir eggs get installed # pick up setup-dir .egg files only: no .egg-info cmd.package_index.scan(glob.glob('*.egg')) self.run_command('bdist_egg') args = [self.distribution.get_command_obj('bdist_egg').egg_output] if setuptools.bootstrap_install_from: # Bootstrap self-installation of setuptools args.insert(0, setuptools.bootstrap_install_from) cmd.args = args cmd.run() setuptools.bootstrap_install_from = None # XXX Python 3.1 doesn't see _nc if this is inside the class install.sub_commands = ( [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + install.new_commands )
mit
ol-loginov/intellij-community
python/helpers/epydoc/markup/doctest.py
90
13069
# # doctest.py: Syntax Highlighting for doctest blocks # Edward Loper # # Created [06/28/03 02:52 AM] # $Id: restructuredtext.py 1210 2006-04-10 13:25:50Z edloper $ # """ Syntax highlighting for doctest blocks. This module defines two functions, L{doctest_to_html()} and L{doctest_to_latex()}, which can be used to perform syntax highlighting on doctest blocks. It also defines the more general C{colorize_doctest()}, which could be used to do syntac highlighting on doctest blocks with other output formats. (Both C{doctest_to_html()} and C{doctest_to_latex()} are defined using C{colorize_doctest()}.) """ __docformat__ = 'epytext en' import re from epydoc.util import plaintext_to_html, plaintext_to_latex __all__ = ['doctest_to_html', 'doctest_to_latex', 'DoctestColorizer', 'XMLDoctestColorizer', 'HTMLDoctestColorizer', 'LaTeXDoctestColorizer'] def doctest_to_html(s): """ Perform syntax highlighting on the given doctest string, and return the resulting HTML code. This code consists of a C{<pre>} block with class=py-doctest. Syntax highlighting is performed using the following css classes: - C{py-prompt} -- the Python PS1 prompt (>>>) - C{py-more} -- the Python PS2 prompt (...) - C{py-keyword} -- a Python keyword (for, if, etc.) - C{py-builtin} -- a Python builtin name (abs, dir, etc.) - C{py-string} -- a string literal - C{py-comment} -- a comment - C{py-except} -- an exception traceback (up to the next >>>) - C{py-output} -- the output from a doctest block. - C{py-defname} -- the name of a function or class defined by a C{def} or C{class} statement. """ return HTMLDoctestColorizer().colorize_doctest(s) def doctest_to_latex(s): """ Perform syntax highlighting on the given doctest string, and return the resulting LaTeX code. This code consists of an C{alltt} environment. Syntax highlighting is performed using the following new latex commands, which must be defined externally: - C{\pysrcprompt} -- the Python PS1 prompt (>>>) - C{\pysrcmore} -- the Python PS2 prompt (...) - C{\pysrckeyword} -- a Python keyword (for, if, etc.) - C{\pysrcbuiltin} -- a Python builtin name (abs, dir, etc.) - C{\pysrcstring} -- a string literal - C{\pysrccomment} -- a comment - C{\pysrcexcept} -- an exception traceback (up to the next >>>) - C{\pysrcoutput} -- the output from a doctest block. - C{\pysrcdefname} -- the name of a function or class defined by a C{def} or C{class} statement. """ return LaTeXDoctestColorizer().colorize_doctest(s) class DoctestColorizer: """ An abstract base class for performing syntax highlighting on doctest blocks and other bits of Python code. Subclasses should provide definitions for: - The L{markup()} method, which takes a substring and a tag, and returns a colorized version of the substring. - The L{PREFIX} and L{SUFFIX} variables, which will be added to the beginning and end of the strings returned by L{colorize_codeblock} and L{colorize_doctest}. """ #: A string that is added to the beginning of the strings #: returned by L{colorize_codeblock} and L{colorize_doctest}. #: Typically, this string begins a preformatted area. PREFIX = None #: A string that is added to the end of the strings #: returned by L{colorize_codeblock} and L{colorize_doctest}. #: Typically, this string ends a preformatted area. SUFFIX = None #: A list of the names of all Python keywords. ('as' is included #: even though it is technically not a keyword.) _KEYWORDS = ("and del for is raise" "assert elif from lambda return" "break else global not try" "class except if or while" "continue exec import pass yield" "def finally in print as").split() #: A list of all Python builtins. _BUILTINS = [_BI for _BI in dir(__builtins__) if not _BI.startswith('__')] #: A regexp group that matches keywords. _KEYWORD_GRP = '|'.join([r'\b%s\b' % _KW for _KW in _KEYWORDS]) #: A regexp group that matches Python builtins. _BUILTIN_GRP = (r'(?<!\.)(?:%s)' % '|'.join([r'\b%s\b' % _BI for _BI in _BUILTINS])) #: A regexp group that matches Python strings. _STRING_GRP = '|'.join( [r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))', r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"]) #: A regexp group that matches Python comments. _COMMENT_GRP = '(#.*?$)' #: A regexp group that matches Python ">>>" prompts. _PROMPT1_GRP = r'^[ \t]*>>>(?:[ \t]|$)' #: A regexp group that matches Python "..." prompts. _PROMPT2_GRP = r'^[ \t]*\.\.\.(?:[ \t]|$)' #: A regexp group that matches function and class definitions. _DEFINE_GRP = r'\b(?:def|class)[ \t]+\w+' #: A regexp that matches Python prompts PROMPT_RE = re.compile('(%s|%s)' % (_PROMPT1_GRP, _PROMPT2_GRP), re.MULTILINE | re.DOTALL) #: A regexp that matches Python "..." prompts. PROMPT2_RE = re.compile('(%s)' % _PROMPT2_GRP, re.MULTILINE | re.DOTALL) #: A regexp that matches doctest exception blocks. EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*', re.DOTALL | re.MULTILINE) #: A regexp that matches doctest directives. DOCTEST_DIRECTIVE_RE = re.compile(r'#[ \t]*doctest:.*') #: A regexp that matches all of the regions of a doctest block #: that should be colored. DOCTEST_RE = re.compile( r'(.*?)((?P<STRING>%s)|(?P<COMMENT>%s)|(?P<DEFINE>%s)|' r'(?P<KEYWORD>%s)|(?P<BUILTIN>%s)|' r'(?P<PROMPT1>%s)|(?P<PROMPT2>%s)|(?P<EOS>\Z))' % ( _STRING_GRP, _COMMENT_GRP, _DEFINE_GRP, _KEYWORD_GRP, _BUILTIN_GRP, _PROMPT1_GRP, _PROMPT2_GRP), re.MULTILINE | re.DOTALL) #: This regular expression is used to find doctest examples in a #: string. This is copied from the standard Python doctest.py #: module (after the refactoring in Python 2.4+). DOCTEST_EXAMPLE_RE = re.compile(r''' # Source consists of a PS1 line followed by zero or more PS2 lines. (?P<source> (?:^(?P<indent> [ ]*) >>> .*) # PS1 line (?:\n [ ]* \.\.\. .*)* # PS2 lines \n?) # Want consists of any non-blank lines that do not start with PS1. (?P<want> (?:(?![ ]*$) # Not a blank line (?![ ]*>>>) # Not a line starting with PS1 .*$\n? # But any other line )*) ''', re.MULTILINE | re.VERBOSE) def colorize_inline(self, s): """ Colorize a string containing Python code. Do not add the L{PREFIX} and L{SUFFIX} strings to the returned value. This method is intended for generating syntax-highlighted strings that are appropriate for inclusion as inline expressions. """ return self.DOCTEST_RE.sub(self.subfunc, s) def colorize_codeblock(self, s): """ Colorize a string containing only Python code. This method differs from L{colorize_doctest} in that it will not search for doctest prompts when deciding how to colorize the string. """ body = self.DOCTEST_RE.sub(self.subfunc, s) return self.PREFIX + body + self.SUFFIX def colorize_doctest(self, s, strip_directives=False): """ Colorize a string containing one or more doctest examples. """ output = [] charno = 0 for m in self.DOCTEST_EXAMPLE_RE.finditer(s): # Parse the doctest example: pysrc, want = m.group('source', 'want') # Pre-example text: output.append(s[charno:m.start()]) # Example source code: output.append(self.DOCTEST_RE.sub(self.subfunc, pysrc)) # Example output: if want: if self.EXCEPT_RE.match(want): output += '\n'.join([self.markup(line, 'except') for line in want.split('\n')]) else: output += '\n'.join([self.markup(line, 'output') for line in want.split('\n')]) # Update charno charno = m.end() # Add any remaining post-example text. output.append(s[charno:]) return self.PREFIX + ''.join(output) + self.SUFFIX def subfunc(self, match): other, text = match.group(1, 2) #print 'M %20r %20r' % (other, text) # <- for debugging if other: other = '\n'.join([self.markup(line, 'other') for line in other.split('\n')]) if match.group('PROMPT1'): return other + self.markup(text, 'prompt') elif match.group('PROMPT2'): return other + self.markup(text, 'more') elif match.group('KEYWORD'): return other + self.markup(text, 'keyword') elif match.group('BUILTIN'): return other + self.markup(text, 'builtin') elif match.group('COMMENT'): return other + self.markup(text, 'comment') elif match.group('STRING') and '\n' not in text: return other + self.markup(text, 'string') elif match.group('STRING'): # It's a multiline string; colorize the string & prompt # portion of each line. pieces = [] for line in text.split('\n'): if self.PROMPT2_RE.match(line): if len(line) > 4: pieces.append(self.markup(line[:4], 'more') + self.markup(line[4:], 'string')) else: pieces.append(self.markup(line[:4], 'more')) elif line: pieces.append(self.markup(line, 'string')) else: pieces.append('') return other + '\n'.join(pieces) elif match.group('DEFINE'): m = re.match('(?P<def>\w+)(?P<space>\s+)(?P<name>\w+)', text) return other + (self.markup(m.group('def'), 'keyword') + self.markup(m.group('space'), 'other') + self.markup(m.group('name'), 'defname')) elif match.group('EOS') is not None: return other else: assert 0, 'Unexpected match!' def markup(self, s, tag): """ Apply syntax highlighting to a single substring from a doctest block. C{s} is the substring, and C{tag} is the tag that should be applied to the substring. C{tag} will be one of the following strings: - C{prompt} -- the Python PS1 prompt (>>>) - C{more} -- the Python PS2 prompt (...) - C{keyword} -- a Python keyword (for, if, etc.) - C{builtin} -- a Python builtin name (abs, dir, etc.) - C{string} -- a string literal - C{comment} -- a comment - C{except} -- an exception traceback (up to the next >>>) - C{output} -- the output from a doctest block. - C{defname} -- the name of a function or class defined by a C{def} or C{class} statement. - C{other} -- anything else (does *not* include output.) """ raise AssertionError("Abstract method") class XMLDoctestColorizer(DoctestColorizer): """ A subclass of DoctestColorizer that generates XML-like output. This class is mainly intended to be used for testing purposes. """ PREFIX = '<colorized>\n' SUFFIX = '</colorized>\n' def markup(self, s, tag): s = s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') if tag == 'other': return s else: return '<%s>%s</%s>' % (tag, s, tag) class HTMLDoctestColorizer(DoctestColorizer): """A subclass of DoctestColorizer that generates HTML output.""" PREFIX = '<pre class="py-doctest">\n' SUFFIX = '</pre>\n' def markup(self, s, tag): if tag == 'other': return plaintext_to_html(s) else: return ('<span class="py-%s">%s</span>' % (tag, plaintext_to_html(s))) class LaTeXDoctestColorizer(DoctestColorizer): """A subclass of DoctestColorizer that generates LaTeX output.""" PREFIX = '\\begin{alltt}\n' SUFFIX = '\\end{alltt}\n' def markup(self, s, tag): if tag == 'other': return plaintext_to_latex(s) else: return '\\pysrc%s{%s}' % (tag, plaintext_to_latex(s))
apache-2.0
atopuzov/nitro-python
nssrc/com/citrix/netscaler/nitro/resource/config/cmp/cmpparameter.py
3
14320
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class cmpparameter(base_resource) : """ Configuration for CMP parameter resource. """ def __init__(self) : self._cmplevel = "" self._quantumsize = 0 self._servercmp = "" self._heurexpiry = "" self._heurexpirythres = 0 self._heurexpiryhistwt = 0 self._minressize = 0 self._cmpbypasspct = 0 self._cmponpush = "" self._policytype = "" self._addvaryheader = "" self._varyheadervalue = "" self._externalcache = "" @property def cmplevel(self) : ur"""Specify a compression level. Available settings function as follows: * Optimal - Corresponds to a gzip GZIP level of 5-7. * Best speed - Corresponds to a gzip level of 1. * Best compression - Corresponds to a gzip level of 9.<br/>Default value: optimal<br/>Possible values = optimal, bestspeed, bestcompression. """ try : return self._cmplevel except Exception as e: raise e @cmplevel.setter def cmplevel(self, cmplevel) : ur"""Specify a compression level. Available settings function as follows: * Optimal - Corresponds to a gzip GZIP level of 5-7. * Best speed - Corresponds to a gzip level of 1. * Best compression - Corresponds to a gzip level of 9.<br/>Default value: optimal<br/>Possible values = optimal, bestspeed, bestcompression """ try : self._cmplevel = cmplevel except Exception as e: raise e @property def quantumsize(self) : ur"""Minimum quantum of data to be filled before compression begins.<br/>Default value: 57344<br/>Minimum length = 8<br/>Maximum length = 63488. """ try : return self._quantumsize except Exception as e: raise e @quantumsize.setter def quantumsize(self, quantumsize) : ur"""Minimum quantum of data to be filled before compression begins.<br/>Default value: 57344<br/>Minimum length = 8<br/>Maximum length = 63488 """ try : self._quantumsize = quantumsize except Exception as e: raise e @property def servercmp(self) : ur"""Allow the server to send compressed data to the NetScaler appliance. With the default setting, the NetScaler appliance handles all compression.<br/>Default value: ON<br/>Possible values = ON, OFF. """ try : return self._servercmp except Exception as e: raise e @servercmp.setter def servercmp(self, servercmp) : ur"""Allow the server to send compressed data to the NetScaler appliance. With the default setting, the NetScaler appliance handles all compression.<br/>Default value: ON<br/>Possible values = ON, OFF """ try : self._servercmp = servercmp except Exception as e: raise e @property def heurexpiry(self) : ur"""Heuristic basefile expiry.<br/>Default value: OFF<br/>Possible values = ON, OFF. """ try : return self._heurexpiry except Exception as e: raise e @heurexpiry.setter def heurexpiry(self, heurexpiry) : ur"""Heuristic basefile expiry.<br/>Default value: OFF<br/>Possible values = ON, OFF """ try : self._heurexpiry = heurexpiry except Exception as e: raise e @property def heurexpirythres(self) : ur"""Threshold compression ratio for heuristic basefile expiry, multiplied by 100. For example, to set the threshold ratio to 1.25, specify 125.<br/>Default value: 100<br/>Minimum length = 1<br/>Maximum length = 1000. """ try : return self._heurexpirythres except Exception as e: raise e @heurexpirythres.setter def heurexpirythres(self, heurexpirythres) : ur"""Threshold compression ratio for heuristic basefile expiry, multiplied by 100. For example, to set the threshold ratio to 1.25, specify 125.<br/>Default value: 100<br/>Minimum length = 1<br/>Maximum length = 1000 """ try : self._heurexpirythres = heurexpirythres except Exception as e: raise e @property def heurexpiryhistwt(self) : ur"""For heuristic basefile expiry, weightage to be given to historical delta compression ratio, specified as percentage. For example, to give 25% weightage to historical ratio (and therefore 75% weightage to the ratio for current delta compression transaction), specify 25.<br/>Default value: 50<br/>Minimum length = 1<br/>Maximum length = 100. """ try : return self._heurexpiryhistwt except Exception as e: raise e @heurexpiryhistwt.setter def heurexpiryhistwt(self, heurexpiryhistwt) : ur"""For heuristic basefile expiry, weightage to be given to historical delta compression ratio, specified as percentage. For example, to give 25% weightage to historical ratio (and therefore 75% weightage to the ratio for current delta compression transaction), specify 25.<br/>Default value: 50<br/>Minimum length = 1<br/>Maximum length = 100 """ try : self._heurexpiryhistwt = heurexpiryhistwt except Exception as e: raise e @property def minressize(self) : ur"""Smallest response size, in bytes, to be compressed. """ try : return self._minressize except Exception as e: raise e @minressize.setter def minressize(self, minressize) : ur"""Smallest response size, in bytes, to be compressed. """ try : self._minressize = minressize except Exception as e: raise e @property def cmpbypasspct(self) : ur"""NetScaler CPU threshold after which compression is not performed. Range: 0 - 100.<br/>Default value: 100<br/>Maximum length = 100. """ try : return self._cmpbypasspct except Exception as e: raise e @cmpbypasspct.setter def cmpbypasspct(self, cmpbypasspct) : ur"""NetScaler CPU threshold after which compression is not performed. Range: 0 - 100.<br/>Default value: 100<br/>Maximum length = 100 """ try : self._cmpbypasspct = cmpbypasspct except Exception as e: raise e @property def cmponpush(self) : ur"""NetScaler appliance does not wait for the quantum to be filled before starting to compress data. Upon receipt of a packet with a PUSH flag, the appliance immediately begins compression of the accumulated packets.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._cmponpush except Exception as e: raise e @cmponpush.setter def cmponpush(self, cmponpush) : ur"""NetScaler appliance does not wait for the quantum to be filled before starting to compress data. Upon receipt of a packet with a PUSH flag, the appliance immediately begins compression of the accumulated packets.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._cmponpush = cmponpush except Exception as e: raise e @property def policytype(self) : ur"""Type of policy. Available settings function as follows: * Classic - Classic policies evaluate basic characteristics of traffic and other data. * Advanced - Advanced policies (which have been renamed as default syntax policies) can perform the same type of evaluations as classic policies. They also enable you to analyze more data (for example, the body of an HTTP request) and to configure more operations in the policy rule (for example, transforming data in the body of a request into an HTTP header).<br/>Default value: CLASSIC<br/>Possible values = CLASSIC, ADVANCED. """ try : return self._policytype except Exception as e: raise e @policytype.setter def policytype(self, policytype) : ur"""Type of policy. Available settings function as follows: * Classic - Classic policies evaluate basic characteristics of traffic and other data. * Advanced - Advanced policies (which have been renamed as default syntax policies) can perform the same type of evaluations as classic policies. They also enable you to analyze more data (for example, the body of an HTTP request) and to configure more operations in the policy rule (for example, transforming data in the body of a request into an HTTP header).<br/>Default value: CLASSIC<br/>Possible values = CLASSIC, ADVANCED """ try : self._policytype = policytype except Exception as e: raise e @property def addvaryheader(self) : ur"""Control insertion of the Vary header in HTTP responses compressed by NetScaler. Intermediate caches store different versions of the response for different values of the headers present in the Vary response header.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._addvaryheader except Exception as e: raise e @addvaryheader.setter def addvaryheader(self, addvaryheader) : ur"""Control insertion of the Vary header in HTTP responses compressed by NetScaler. Intermediate caches store different versions of the response for different values of the headers present in the Vary response header.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._addvaryheader = addvaryheader except Exception as e: raise e @property def varyheadervalue(self) : ur"""The value of the HTTP Vary header for compressed responses. If this argument is not specified, a default value of "Accept-Encoding" will be used.<br/>Minimum length = 1. """ try : return self._varyheadervalue except Exception as e: raise e @varyheadervalue.setter def varyheadervalue(self, varyheadervalue) : ur"""The value of the HTTP Vary header for compressed responses. If this argument is not specified, a default value of "Accept-Encoding" will be used.<br/>Minimum length = 1 """ try : self._varyheadervalue = varyheadervalue except Exception as e: raise e @property def externalcache(self) : ur"""Enable insertion of Cache-Control: private response directive to indicate response message is intended for a single user and must not be cached by a shared or proxy cache.<br/>Default value: NO<br/>Possible values = YES, NO. """ try : return self._externalcache except Exception as e: raise e @externalcache.setter def externalcache(self, externalcache) : ur"""Enable insertion of Cache-Control: private response directive to indicate response message is intended for a single user and must not be cached by a shared or proxy cache.<br/>Default value: NO<br/>Possible values = YES, NO """ try : self._externalcache = externalcache except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(cmpparameter_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.cmpparameter except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : return 0 except Exception as e : raise e @classmethod def update(cls, client, resource) : ur""" Use this API to update cmpparameter. """ try : if type(resource) is not list : updateresource = cmpparameter() updateresource.cmplevel = resource.cmplevel updateresource.quantumsize = resource.quantumsize updateresource.servercmp = resource.servercmp updateresource.heurexpiry = resource.heurexpiry updateresource.heurexpirythres = resource.heurexpirythres updateresource.heurexpiryhistwt = resource.heurexpiryhistwt updateresource.minressize = resource.minressize updateresource.cmpbypasspct = resource.cmpbypasspct updateresource.cmponpush = resource.cmponpush updateresource.policytype = resource.policytype updateresource.addvaryheader = resource.addvaryheader updateresource.varyheadervalue = resource.varyheadervalue updateresource.externalcache = resource.externalcache return updateresource.update_resource(client) except Exception as e : raise e @classmethod def unset(cls, client, resource, args) : ur""" Use this API to unset the properties of cmpparameter resource. Properties that need to be unset are specified in args array. """ try : if type(resource) is not list : unsetresource = cmpparameter() return unsetresource.unset_resource(client, args) except Exception as e : raise e @classmethod def get(cls, client, name="", option_="") : ur""" Use this API to fetch all the cmpparameter resources that are configured on netscaler. """ try : if not name : obj = cmpparameter() response = obj.get_resources(client, option_) return response except Exception as e : raise e class Externalcache: YES = "YES" NO = "NO" class Addvaryheader: ENABLED = "ENABLED" DISABLED = "DISABLED" class Servercmp: ON = "ON" OFF = "OFF" class Cmplevel: optimal = "optimal" bestspeed = "bestspeed" bestcompression = "bestcompression" class Policytype: CLASSIC = "CLASSIC" ADVANCED = "ADVANCED" class Cmponpush: ENABLED = "ENABLED" DISABLED = "DISABLED" class Heurexpiry: ON = "ON" OFF = "OFF" class cmpparameter_response(base_response) : def __init__(self, length=1) : self.cmpparameter = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.cmpparameter = [cmpparameter() for _ in range(length)]
apache-2.0
noxrepo/nox-classic
src/utilities/switch_command.py
16
2438
#!/usr/bin/python # # Send arbitrary command to a switch # import getopt,sys,os import httplib import simplejson import urllib # TODO: need to set the path for this from nox.webapps.webserviceclient.simple import PersistentLogin, NOXWSClient def usage(): print """ Usage: switch_command.py -d <directory name> -s <switch name> -c <command> [-u <admin username>] [-p <admin passwd>] [args] e.g. switch_command -d Built-in -s foo -c restart Note: accepts mangled switch names """ if __name__ == '__main__': sys.path.append('/opt/nox/bin') try: opts, args = getopt.getopt(sys.argv[1:], "hd:s:c:u:p:") except getopt.GetoptError, err: # print help information and exit: print str(err) # will print something like "option -a not recognized" usage() sys.exit(2) directory = None switch = None command = None adminu = "admin" adminp = "admin" for o, a in opts: if o == "-h": usage() sys.exit() elif o == '-d': directory = a elif o == '-s': switch = a try: if switch.find(';') != -1: directory = switch.split(';')[0] switch = switch.split(';')[0] except Exception, e: print 'Format error in mangled name',switch sys.exit() elif o == '-c': command = a elif o == '-u': adminu = a elif o == '-p': adminp = a else: assert False, "unhandled option" if not directory or not switch or not command: usage() sys.exit() print ' Logging into web service.. ', loginmgr = PersistentLogin("admin","admin") # currently only support localhost wsc = NOXWSClient("127.0.0.1", 443, True, loginmgr) print 'done' urlstr = '/ws.v1/switch/'+directory+'/'+switch+'/command' print ' Issuing:' print '\t',urlstr url = urllib.quote(urlstr) d = {} d['command'] = command d['args'] = args headers = {} headers["content-type"] = "application/json" response = wsc.put(url, headers, simplejson.dumps(d)) body = response.getBody() if body == '0': print 'Command sent succesfully' else: print 'Error: ',body
gpl-3.0
piquadrat/django
django/middleware/csrf.py
11
13363
""" Cross Site Request Forgery Middleware. This module provides a middleware that implements protection against request forgeries from other sites. """ import logging import re import string from urllib.parse import urlparse from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.urls import get_callable from django.utils.cache import patch_vary_headers from django.utils.crypto import constant_time_compare, get_random_string from django.utils.deprecation import MiddlewareMixin from django.utils.http import is_same_domain logger = logging.getLogger('django.security.csrf') REASON_NO_REFERER = "Referer checking failed - no Referer." REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins." REASON_NO_CSRF_COOKIE = "CSRF cookie not set." REASON_BAD_TOKEN = "CSRF token missing or incorrect." REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed." REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure." CSRF_SECRET_LENGTH = 32 CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits CSRF_SESSION_KEY = '_csrftoken' def _get_failure_view(): """Return the view to be used for CSRF rejections.""" return get_callable(settings.CSRF_FAILURE_VIEW) def _get_new_csrf_string(): return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS) def _salt_cipher_secret(secret): """ Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a token by adding a salt and using it to encrypt the secret. """ salt = _get_new_csrf_string() chars = CSRF_ALLOWED_CHARS pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in salt)) cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs) return salt + cipher def _unsalt_cipher_token(token): """ Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length CSRF_TOKEN_LENGTH, and that its first half is a salt), use it to decrypt the second half to produce the original secret. """ salt = token[:CSRF_SECRET_LENGTH] token = token[CSRF_SECRET_LENGTH:] chars = CSRF_ALLOWED_CHARS pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in salt)) secret = ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok return secret def _get_new_csrf_token(): return _salt_cipher_secret(_get_new_csrf_string()) def get_token(request): """ Return the CSRF token required for a POST form. The token is an alphanumeric value. A new token is created if one is not already set. A side effect of calling this function is to make the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. """ if "CSRF_COOKIE" not in request.META: csrf_secret = _get_new_csrf_string() request.META["CSRF_COOKIE"] = _salt_cipher_secret(csrf_secret) else: csrf_secret = _unsalt_cipher_token(request.META["CSRF_COOKIE"]) request.META["CSRF_COOKIE_USED"] = True return _salt_cipher_secret(csrf_secret) def rotate_token(request): """ Change the CSRF token in use for a request - should be done on login for security purposes. """ request.META.update({ "CSRF_COOKIE_USED": True, "CSRF_COOKIE": _get_new_csrf_token(), }) request.csrf_cookie_needs_reset = True def _sanitize_token(token): # Allow only ASCII alphanumerics if re.search('[^a-zA-Z0-9]', token): return _get_new_csrf_token() elif len(token) == CSRF_TOKEN_LENGTH: return token elif len(token) == CSRF_SECRET_LENGTH: # Older Django versions set cookies to values of CSRF_SECRET_LENGTH # alphanumeric characters. For backwards compatibility, accept # such values as unsalted secrets. # It's easier to salt here and be consistent later, rather than add # different code paths in the checks, although that might be a tad more # efficient. return _salt_cipher_secret(token) return _get_new_csrf_token() def _compare_salted_tokens(request_csrf_token, csrf_token): # Assume both arguments are sanitized -- that is, strings of # length CSRF_TOKEN_LENGTH, all CSRF_ALLOWED_CHARS. return constant_time_compare( _unsalt_cipher_token(request_csrf_token), _unsalt_cipher_token(csrf_token), ) class CsrfViewMiddleware(MiddlewareMixin): """ Require a present and correct csrfmiddlewaretoken for POST requests that have a CSRF cookie, and set an outgoing CSRF cookie. This middleware should be used in conjunction with the {% csrf_token %} template tag. """ # The _accept and _reject methods currently only exist for the sake of the # requires_csrf_token decorator. def _accept(self, request): # Avoid checking the request twice by adding a custom attribute to # request. This will be relevant when both decorator and middleware # are used. request.csrf_processing_done = True return None def _reject(self, request, reason): logger.warning( 'Forbidden (%s): %s', reason, request.path, extra={ 'status_code': 403, 'request': request, } ) return _get_failure_view()(request, reason=reason) def _get_token(self, request): if settings.CSRF_USE_SESSIONS: try: return request.session.get(CSRF_SESSION_KEY) except AttributeError: raise ImproperlyConfigured( 'CSRF_USE_SESSIONS is enabled, but request.session is not ' 'set. SessionMiddleware must appear before CsrfViewMiddleware ' 'in MIDDLEWARE%s.' % ('_CLASSES' if settings.MIDDLEWARE is None else '') ) else: try: cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME] except KeyError: return None csrf_token = _sanitize_token(cookie_token) if csrf_token != cookie_token: # Cookie token needed to be replaced; # the cookie needs to be reset. request.csrf_cookie_needs_reset = True return csrf_token def _set_token(self, request, response): if settings.CSRF_USE_SESSIONS: request.session[CSRF_SESSION_KEY] = request.META['CSRF_COOKIE'] else: response.set_cookie( settings.CSRF_COOKIE_NAME, request.META['CSRF_COOKIE'], max_age=settings.CSRF_COOKIE_AGE, domain=settings.CSRF_COOKIE_DOMAIN, path=settings.CSRF_COOKIE_PATH, secure=settings.CSRF_COOKIE_SECURE, httponly=settings.CSRF_COOKIE_HTTPONLY, ) # Set the Vary header since content varies with the CSRF cookie. patch_vary_headers(response, ('Cookie',)) def process_request(self, request): csrf_token = self._get_token(request) if csrf_token is not None: # Use same token next time. request.META['CSRF_COOKIE'] = csrf_token def process_view(self, request, callback, callback_args, callback_kwargs): if getattr(request, 'csrf_processing_done', False): return None # Wait until request.META["CSRF_COOKIE"] has been manipulated before # bailing out, so that get_token still works if getattr(callback, 'csrf_exempt', False): return None # Assume that anything not defined as 'safe' by RFC7231 needs protection if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): if getattr(request, '_dont_enforce_csrf_checks', False): # Mechanism to turn off CSRF checks for test suite. # It comes after the creation of CSRF cookies, so that # everything else continues to work exactly the same # (e.g. cookies are sent, etc.), but before any # branches that call reject(). return self._accept(request) if request.is_secure(): # Suppose user visits http://example.com/ # An active network attacker (man-in-the-middle, MITM) sends a # POST form that targets https://example.com/detonate-bomb/ and # submits it via JavaScript. # # The attacker will need to provide a CSRF cookie and token, but # that's no problem for a MITM and the session-independent # secret we're using. So the MITM can circumvent the CSRF # protection. This is true for any HTTP connection, but anyone # using HTTPS expects better! For this reason, for # https://example.com/ we need additional protection that treats # http://example.com/ as completely untrusted. Under HTTPS, # Barth et al. found that the Referer header is missing for # same-domain requests in only about 0.2% of cases or less, so # we can use strict Referer checking. referer = request.META.get('HTTP_REFERER') if referer is None: return self._reject(request, REASON_NO_REFERER) referer = urlparse(referer) # Make sure we have a valid URL for Referer. if '' in (referer.scheme, referer.netloc): return self._reject(request, REASON_MALFORMED_REFERER) # Ensure that our Referer is also secure. if referer.scheme != 'https': return self._reject(request, REASON_INSECURE_REFERER) # If there isn't a CSRF_COOKIE_DOMAIN, require an exact match # match on host:port. If not, obey the cookie rules (or those # for the session cookie, if CSRF_USE_SESSIONS). good_referer = ( settings.SESSION_COOKIE_DOMAIN if settings.CSRF_USE_SESSIONS else settings.CSRF_COOKIE_DOMAIN ) if good_referer is not None: server_port = request.get_port() if server_port not in ('443', '80'): good_referer = '%s:%s' % (good_referer, server_port) else: # request.get_host() includes the port. good_referer = request.get_host() # Here we generate a list of all acceptable HTTP referers, # including the current host since that has been validated # upstream. good_hosts = list(settings.CSRF_TRUSTED_ORIGINS) good_hosts.append(good_referer) if not any(is_same_domain(referer.netloc, host) for host in good_hosts): reason = REASON_BAD_REFERER % referer.geturl() return self._reject(request, reason) csrf_token = request.META.get('CSRF_COOKIE') if csrf_token is None: # No CSRF cookie. For POST requests, we insist on a CSRF cookie, # and in this way we can avoid all CSRF attacks, including login # CSRF. return self._reject(request, REASON_NO_CSRF_COOKIE) # Check non-cookie token for match. request_csrf_token = "" if request.method == "POST": try: request_csrf_token = request.POST.get('csrfmiddlewaretoken', '') except IOError: # Handle a broken connection before we've completed reading # the POST data. process_view shouldn't raise any # exceptions, so we'll ignore and serve the user a 403 # (assuming they're still listening, which they probably # aren't because of the error). pass if request_csrf_token == "": # Fall back to X-CSRFToken, to make things easier for AJAX, # and possible for PUT/DELETE. request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '') request_csrf_token = _sanitize_token(request_csrf_token) if not _compare_salted_tokens(request_csrf_token, csrf_token): return self._reject(request, REASON_BAD_TOKEN) return self._accept(request) def process_response(self, request, response): if not getattr(request, 'csrf_cookie_needs_reset', False): if getattr(response, 'csrf_cookie_set', False): return response if not request.META.get("CSRF_COOKIE_USED", False): return response # Set the CSRF cookie even if it's already set, so we renew # the expiry timer. self._set_token(request, response) response.csrf_cookie_set = True return response
bsd-3-clause
Hao-Liu/avocado-vt
virttest/staging/utils_cgroup.py
12
26346
#!/usr/bin/python # -*- coding: utf-8 -*- """ Helpers for cgroup testing. :copyright: 2011 Red Hat Inc. :author: Lukas Doktor <ldoktor@redhat.com> """ import logging import os import shutil import subprocess import time import re import random import commands from tempfile import mkdtemp from avocado.core import exceptions from avocado.utils import software_manager from avocado.utils import process from . import service class Cgroup(object): """ Cgroup handling class. """ def __init__(self, module, _client): """ Constructor :param module: Name of the cgroup module :param _client: Test script pwd + name """ self.module = module self._client = _client self.root = None self.cgroups = [] def __del__(self): """ Destructor """ self.cgroups.sort(reverse=True) for pwd in self.cgroups[:]: for task in self.get_property("tasks", pwd): if task: self.set_root_cgroup(int(task)) self.rm_cgroup(pwd) def initialize(self, modules): """ Initializes object for use. :param modules: Array of all available cgroup modules. """ self.root = modules.get_pwd(self.module) if not self.root: raise exceptions.TestError("cg.initialize(): Module %s not found" % self.module) def __get_cgroup_pwd(self, cgroup): """ Get cgroup's full path :param cgroup: cgroup name :return: cgroup's full path """ if not isinstance(cgroup, str): raise exceptions.TestError("cgroup type isn't string!") return os.path.join(self.root, cgroup) + '/' def get_cgroup_name(self, pwd=None): """ Get cgroup's name :param pwd: cgroup name :return: cgroup's name """ if pwd is None: # root cgroup return None if isinstance(pwd, int): pwd = self.cgroups[pwd] # self.root is "/cgroup/blkio," not "/cgroup/blkio/" # cgroup is "/cgroup/blkio/test" or "/cgroup/blkio/test/test" # expected cgroup name is test or test/test if pwd.startswith(self.root + '/'): return pwd[len(self.root) + 1: -1] return None def get_cgroup_index(self, cgroup): """ Get cgroup's index in cgroups :param cgroup: cgroup name :return: index of cgroup """ try: if self.__get_cgroup_pwd(cgroup) not in self.cgroups: raise exceptions.TestFail("%s not exists!" % cgroup) cgroup_pwd = self.__get_cgroup_pwd(cgroup) return self.cgroups.index(cgroup_pwd) except process.CmdError: raise exceptions.TestFail("Find index failed!") def mk_cgroup_cgcreate(self, pwd=None, cgroup=None): """ Make a cgroup by executing the cgcreate command :params: cgroup: name of the cgroup to be created :return: last cgroup index """ try: parent_cgroup = self.get_cgroup_name(pwd) if cgroup is None: range = "abcdefghijklmnopqrstuvwxyz0123456789" sub_cgroup = "cgroup-" + "".join(random.sample(range + range.upper(), 6)) else: sub_cgroup = cgroup if parent_cgroup is None: cgroup = sub_cgroup else: # Parent cgroup:test. Created cgroup:test1. # Whole cgroup name is "test/test1" cgroup = os.path.join(parent_cgroup, sub_cgroup) if self.__get_cgroup_pwd(cgroup) in self.cgroups: raise exceptions.TestFail("%s exists!" % cgroup) cgcreate_cmd = "cgcreate -g %s:%s" % (self.module, cgroup) process.run(cgcreate_cmd, ignore_status=False) pwd = self.__get_cgroup_pwd(cgroup) self.cgroups.append(pwd) return len(self.cgroups) - 1 except process.CmdError: raise exceptions.TestFail("Make cgroup by cgcreate failed!") def mk_cgroup(self, pwd=None, cgroup=None): """ Creates new temporary cgroup :param pwd: where to create this cgroup (default: self.root) :param cgroup: desired cgroup name :return: last cgroup index """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: if cgroup and self.__get_cgroup_pwd(cgroup) in self.cgroups: raise exceptions.TestFail("%s exists!" % cgroup) if not cgroup: pwd = mkdtemp(prefix='cgroup-', dir=pwd) + '/' else: pwd = os.path.join(pwd, cgroup) + '/' if not os.path.exists(pwd): os.mkdir(pwd) except Exception, inst: raise exceptions.TestError("cg.mk_cgroup(): %s" % inst) self.cgroups.append(pwd) return len(self.cgroups) - 1 def cgexec(self, cgroup, cmd, args=""): """ Execute command in desired cgroup :param cgroup: Desired cgroup :param cmd: Executed command :param args: Executed command's parameters """ try: cgexec_cmd = ("cgexec -g %s:%s %s %s" % (self.module, cgroup, cmd, args)) status, output = commands.getstatusoutput(cgexec_cmd) return status, output except process.CmdError, detail: raise exceptions.TestFail("Execute %s in cgroup failed!\n%s" % (cmd, detail)) def rm_cgroup(self, pwd): """ Removes cgroup. :param pwd: cgroup directory. """ if isinstance(pwd, int): pwd = self.cgroups[pwd] try: os.rmdir(pwd) self.cgroups.remove(pwd) except ValueError: logging.warn("cg.rm_cgroup(): Removed cgroup which wasn't created" "using this Cgroup") except Exception, inst: raise exceptions.TestError("cg.rm_cgroup(): %s" % inst) def get_all_cgroups(self): """ Get all sub cgroups in this controller """ lscgroup_cmd = "lscgroup %s:/" % self.module result = process.run(lscgroup_cmd, ignore_status=True) if result.exit_status: raise exceptions.TestFail(result.stderr.strip()) cgroup_list = result.stdout.strip().splitlines() # Remove root cgroup cgroup_list = cgroup_list[1:] self.root = get_cgroup_mountpoint(self.module) sub_cgroup_list = [] for item in cgroup_list: sub_cg = item.split(":/")[-1] sub_cg_path = os.path.join(self.root, sub_cg) + '/' sub_cgroup_list.append(sub_cg_path) self.cgroups = sub_cgroup_list return self.cgroups def cgdelete_all_cgroups(self): """ Delete all cgroups in the module """ try: for cgroup_pwd in self.cgroups: # Ignore sub cgroup cgroup = self.get_cgroup_name(cgroup_pwd) if cgroup.count("/") > 0: continue self.cgdelete_cgroup(cgroup, True) except process.CmdError: raise exceptions.TestFail("cgdelete all cgroups in %s failed!" % self.module) def cgdelete_cgroup(self, cgroup, recursive=False): """ Delete desired cgroup. :params cgroup: desired cgroup :params force: If true, sub cgroup can be deleted with parent cgroup """ try: cgroup_pwd = self.__get_cgroup_pwd(cgroup) if cgroup_pwd not in self.cgroups: raise exceptions.TestError("%s doesn't exist!" % cgroup) cmd = "cgdelete %s:%s" % (self.module, cgroup) if recursive: cmd += " -r" process.run(cmd, ignore_status=False) self.cgroups.remove(cgroup_pwd) except process.CmdError, detail: raise exceptions.TestFail("cgdelete %s failed!\n%s" % (cgroup, detail)) def cgclassify_cgroup(self, pid, cgroup): """ Classify pid into cgroup :param pid: pid of the process :param cgroup: cgroup name """ try: cgroup_pwd = self.__get_cgroup_pwd(cgroup) if cgroup_pwd not in self.cgroups: raise exceptions.TestError("%s doesn't exist!" % cgroup) cgclassify_cmd = ("cgclassify -g %s:%s %d" % (self.module, cgroup, pid)) process.run(cgclassify_cmd, ignore_status=False) except process.CmdError, detail: raise exceptions.TestFail("Classify process to tasks file " "failed!: %s" % detail) def get_pids(self, pwd=None): """ Get all pids in cgroup :params: pwd: cgroup directory :return: all pids(list) """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: return [_.strip() for _ in open(os.path.join(pwd, 'tasks'), 'r')] except Exception, inst: raise exceptions.TestError("cg.get_pids(): %s" % inst) def test(self, cmd): """ Executes cgroup_client.py with cmd parameter. :param cmd: command to be executed :return: subprocess.Popen() process """ logging.debug("cg.test(): executing parallel process '%s'", cmd) cmd = self._client + ' ' + cmd process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) return process def is_cgroup(self, pid, pwd): """ Checks if the 'pid' process is in 'pwd' cgroup :param pid: pid of the process :param pwd: cgroup directory :return: 0 when is 'pwd' member """ if isinstance(pwd, int): pwd = self.cgroups[pwd] if open(os.path.join(pwd, 'tasks')).readlines().count("%d\n" % pid) > 0: return 0 else: return -1 def is_root_cgroup(self, pid): """ Checks if the 'pid' process is in root cgroup (WO cgroup) :param pid: pid of the process :return: 0 when is 'root' member """ return self.is_cgroup(pid, self.root) def set_cgroup(self, pid, pwd=None): """ Sets cgroup membership :param pid: pid of the process :param pwd: cgroup directory """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: open(os.path.join(pwd, 'tasks'), 'w').write(str(pid)) except Exception, inst: raise exceptions.TestError("cg.set_cgroup(): %s" % inst) if self.is_cgroup(pid, pwd): raise exceptions.TestError("cg.set_cgroup(): Setting %d pid into %s " "cgroup failed" % (pid, pwd)) def set_root_cgroup(self, pid): """ Resets the cgroup membership (sets to root) :param pid: pid of the process :return: 0 when PASSED """ return self.set_cgroup(pid, self.root) def get_property(self, prop, pwd=None): """ Gets the property value :param prop: property name (file) :param pwd: cgroup directory :return: [] values or None when FAILED """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: # Remove tailing '\n' from each line file_link = os.path.join(pwd, prop) ret = [_[:-1].replace("\t", " ") for _ in open(file_link, 'r')] if ret: return ret else: return [""] except Exception, inst: raise exceptions.TestError("cg.get_property(): %s" % inst) def set_property_h(self, prop, value, pwd=None, check=True, checkprop=None): """ Sets the one-line property value concerning the K,M,G postfix :param prop: property name (file) :param value: desired value :param pwd: cgroup directory :param check: check the value after setup / override checking value :param checkprop: override prop when checking the value """ _value = value try: value = str(value) human = {'B': 1, 'K': 1024, 'M': 1048576, 'G': 1073741824, 'T': 1099511627776 } if human.has_key(value[-1]): value = int(value[:-1]) * human[value[-1]] except Exception: logging.warn("cg.set_prop() fallback into cg.set_property.") value = _value self.set_property(prop, value, pwd, check, checkprop) def set_property(self, prop, value, pwd=None, check=True, checkprop=None): """ Sets the property value :param prop: property name (file) :param value: desired value :param pwd: cgroup directory :param check: check the value after setup / override checking value :param checkprop: override prop when checking the value """ value = str(value) if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: open(os.path.join(pwd, prop), 'w').write(value) except Exception, inst: raise exceptions.TestError("cg.set_property(): %s" % inst) if check is not False: if check is True: check = value if checkprop is None: checkprop = prop _values = self.get_property(checkprop, pwd) # Sanitize non printable characters before check check = " ".join(check.split()) if check not in _values: raise exceptions.TestError("cg.set_property(): Setting failed: " "desired = %s, real values = %s" % (repr(check), repr(_values))) def cgset_property(self, prop, value, pwd=None, check=True, checkprop=None): """ Sets the property value by cgset command :param prop: property name (file) :param value: desired value :param pwd: cgroup directory :param check: check the value after setup / override checking value :param checkprop: override prop when checking the value """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: cgroup = self.get_cgroup_name(pwd) cgset_cmd = "cgset -r %s='%s' %s" % (prop, value, cgroup) process.run(cgset_cmd, ignore_status=False) except process.CmdError, detail: raise exceptions.TestFail( "Modify %s failed!:\n%s" % (prop, detail)) if check is not False: if check is True: check = value if checkprop is None: checkprop = prop _values = self.get_property(checkprop, self.get_cgroup_index(cgroup)) # Sanitize non printable characters before check check = " ".join(check.split()) if check not in _values: raise exceptions.TestError("cg.set_property(): Setting failed: " "desired = %s, real values = %s" % (repr(check), repr(_values))) def smoke_test(self): """ Smoke test Module independent basic tests """ pwd = self.mk_cgroup() ps = self.test("smoke") if ps is None: raise exceptions.TestError( "cg.smoke_test: Couldn't create process") if (ps.poll() is not None): raise exceptions.TestError( "cg.smoke_test: Process died unexpectidly") # New process should be a root member if self.is_root_cgroup(ps.pid): raise exceptions.TestError( "cg.smoke_test: Process is not a root member") # Change the cgroup self.set_cgroup(ps.pid, pwd) # Try to remove used cgroup try: self.rm_cgroup(pwd) except exceptions.TestError: pass else: raise exceptions.TestError("cg.smoke_test: Unexpected successful" " deletion of the used cgroup") # Return the process into the root cgroup self.set_root_cgroup(ps.pid) # It should be safe to remove the cgroup now self.rm_cgroup(pwd) # Finish the process ps.stdin.write('\n') time.sleep(2) if (ps.poll() is None): raise exceptions.TestError( "cg.smoke_test: Process is not finished") class CgroupModules(object): """ Handles the list of different cgroup filesystems. """ def __init__(self, mountdir=None): self.modules = [] self.modules.append([]) self.modules.append([]) self.modules.append([]) if mountdir is None: self.mountdir = mkdtemp(prefix='cgroup-') + '/' self.rm_mountdir = True else: self.mountdir = mountdir self.rm_mountdir = False def __del__(self): """ Unmount all cgroups and remove the mountdir """ for i in range(len(self.modules[0])): if self.modules[2][i]: try: process.system('umount %s -l' % self.modules[1][i]) except Exception, failure_detail: logging.warn("CGM: Couldn't unmount %s directory: %s", self.modules[1][i], failure_detail) try: if self.rm_mountdir: # If delete /cgroup/, this action will break cgroup service. shutil.rmtree(self.mountdir) except Exception: logging.warn( "CGM: Couldn't remove the %s directory", self.mountdir) def init(self, _modules): """ Checks the mounted modules and if necessary mounts them into tmp mountdir. :param _modules: Desired modules.'memory','cpu,cpuset'... :return: Number of initialized modules. """ logging.debug("Desired cgroup modules: %s", _modules) mounts = [] proc_mounts = open('/proc/mounts', 'r') line = proc_mounts.readline().split() while line: if line[2] == 'cgroup': mounts.append(line) line = proc_mounts.readline().split() proc_mounts.close() for module in _modules: # Is it already mounted? i = False _module = set(module.split(',')) for mount in mounts: # 'memory' or 'memory,cpuset' if _module.issubset(mount[3].split(',')): self.modules[0].append(module) self.modules[1].append(mount[1] + '/') self.modules[2].append(False) i = True break if not i: # Not yet mounted module_path = os.path.join(self.mountdir, module) if not os.path.exists(module_path): os.mkdir(module_path) cmd = ('mount -t cgroup -o %s %s %s' % (module, module, module_path)) try: process.run(cmd) self.modules[0].append(module) self.modules[1].append(module_path) self.modules[2].append(True) except process.CmdError: logging.info("Cgroup module '%s' not available", module) logging.debug("Initialized cgroup modules: %s", self.modules[0]) return len(self.modules[0]) def get_pwd(self, module): """ Returns the mount directory of 'module' :param module: desired module (memory, ...) :return: mount directory of 'module' or None """ try: i = self.modules[0].index(module) except Exception, inst: logging.error("module %s not found: %s", module, inst) return None return self.modules[1][i] def get_load_per_cpu(_stats=None): """ Gather load per cpu from /proc/stat :param _stats: previous values :return: list of diff/absolute values of CPU times [SUM, CPU1, CPU2, ...] """ stats = [] f_stat = open('/proc/stat', 'r') if _stats: for i in range(len(_stats)): stats.append(int(f_stat.readline().split()[1]) - _stats[i]) else: line = f_stat.readline() while line: if line.startswith('cpu'): stats.append(int(line.split()[1])) else: break line = f_stat.readline() return stats def get_cgroup_mountpoint(controller, mount_file="/proc/mounts"): """ Get desired controller's mountpoint :param controller: Desired controller :param mount_file: Name of file contains mounting information, in most cases this are not need to be set. :return: controller's mountpoint :raise: TestError when contoller doesn't exist in mount table """ f_cgcon = open(mount_file, "rU") cgconf_txt = f_cgcon.read() f_cgcon.close() mntpt = re.findall( r"\s(\S*cgroup/\S*%s(?=[,\ ])\S*)" % controller, cgconf_txt) if len(mntpt) == 0: # Controller is not supported if not found in mount table. raise exceptions.TestError( "Doesn't support controller <%s>" % controller) return mntpt[0] def get_all_controllers(): """ Get all controllers used in system :return: all used controllers(controller_list) """ try: result = process.run("lssubsys", ignore_status=False) controllers_str = result.stdout.strip() controller_list = [] for controller in controllers_str.splitlines(): controller_sub_list = controller.split(",") controller_list += controller_sub_list except process.CmdError: controller_list = ['cpuacct', 'cpu', 'memory', 'cpuset', 'devices', 'freezer', 'blkio', 'netcls'] return controller_list def resolve_task_cgroup_path(pid, controller): """ Resolving cgroup mount path of a particular task :params: pid : process id of a task for which the cgroup path required :params: controller: takes one of the controller names in controller list :return: resolved path for cgroup controllers of a given pid """ root_path = get_cgroup_mountpoint(controller) proc_cgroup = "/proc/%d/cgroup" % pid if not os.path.isfile(proc_cgroup): raise NameError('File %s does not exist\n Check whether cgroup \ installed in the system' % proc_cgroup) try: proc_file = open(proc_cgroup, 'r') proc_cgroup_txt = proc_file.read() finally: proc_file.close() mount_path = re.findall( r":\S*%s(?=[,:])\S*:(\S*)\n" % controller, proc_cgroup_txt) return os.path.join(root_path, mount_path[0].strip("/")) class CgconfigService(object): """ Cgconfig service class. """ def __init__(self): # libcgroup lack libcgroup-tools dependency will introduces # following error, # Failed to issue method call: # Unit cgconfig.service failed to load: # No such file or directory # # Please refer to # https://bugzilla.redhat.com/show_bug.cgi?format=multiple&id=882887 manager = software_manager.SoftwareManager() if not manager.install('libcgroup-tools'): exceptions.TestError("Failed to install libcgroup-tools on host") self._service_manager = service.Factory.create_service("cgconfig") def _service_cgconfig_control(self, action): """ Cgconfig control by action. If cmd executes successfully, return True, otherwise return False. If the action is status, return True when it's running, otherwise return False. :param action: cgconfig service action """ if not hasattr(self._service_manager, action): raise exceptions.TestError("Unknown action: %s" % action) return getattr(self._service_manager, action)() def cgconfig_start(self): """ Sart cgconfig service """ return self._service_cgconfig_control("start") def cgconfig_stop(self): """ Sop cgconfig service """ return self._service_cgconfig_control("stop") def cgconfig_restart(self): """ Restart cgconfig service """ return self._service_cgconfig_control("restart") def cgconfig_condrestart(self): """ Condrestart cgconfig service """ return self._service_cgconfig_control("condrestart") def cgconfig_is_running(self): """ Check cgconfig service status """ return self._service_cgconfig_control("status") def all_cgroup_delete(): """ Clear all cgroups in system """ try: process.run("cgclear", ignore_status=False) except process.CmdError, detail: raise exceptions.TestFail("Clear all cgroup failed!:\n%s" % detail)
gpl-2.0
suse110/linux-1
scripts/analyze_suspend.py
1537
120394
#!/usr/bin/python # # Tool for analyzing suspend/resume timing # Copyright (c) 2013, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. # # Authors: # Todd Brandt <todd.e.brandt@linux.intel.com> # # Description: # This tool is designed to assist kernel and OS developers in optimizing # their linux stack's suspend/resume time. Using a kernel image built # with a few extra options enabled, the tool will execute a suspend and # will capture dmesg and ftrace data until resume is complete. This data # is transformed into a device timeline and a callgraph to give a quick # and detailed view of which devices and callbacks are taking the most # time in suspend/resume. The output is a single html file which can be # viewed in firefox or chrome. # # The following kernel build options are required: # CONFIG_PM_DEBUG=y # CONFIG_PM_SLEEP_DEBUG=y # CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER=y # CONFIG_FUNCTION_GRAPH_TRACER=y # # For kernel versions older than 3.15: # The following additional kernel parameters are required: # (e.g. in file /etc/default/grub) # GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..." # # ----------------- LIBRARIES -------------------- import sys import time import os import string import re import platform from datetime import datetime import struct # ----------------- CLASSES -------------------- # Class: SystemValues # Description: # A global, single-instance container used to # store system values and test parameters class SystemValues: version = 3.0 verbose = False testdir = '.' tpath = '/sys/kernel/debug/tracing/' fpdtpath = '/sys/firmware/acpi/tables/FPDT' epath = '/sys/kernel/debug/tracing/events/power/' traceevents = [ 'suspend_resume', 'device_pm_callback_end', 'device_pm_callback_start' ] modename = { 'freeze': 'Suspend-To-Idle (S0)', 'standby': 'Power-On Suspend (S1)', 'mem': 'Suspend-to-RAM (S3)', 'disk': 'Suspend-to-disk (S4)' } mempath = '/dev/mem' powerfile = '/sys/power/state' suspendmode = 'mem' hostname = 'localhost' prefix = 'test' teststamp = '' dmesgfile = '' ftracefile = '' htmlfile = '' rtcwake = False rtcwaketime = 10 rtcpath = '' android = False adb = 'adb' devicefilter = [] stamp = 0 execcount = 1 x2delay = 0 usecallgraph = False usetraceevents = False usetraceeventsonly = False notestrun = False altdevname = dict() postresumetime = 0 tracertypefmt = '# tracer: (?P<t>.*)' firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$' postresumefmt = '# post resume time (?P<t>[0-9]*)$' stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\ '(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\ ' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$' def __init__(self): self.hostname = platform.node() if(self.hostname == ''): self.hostname = 'localhost' rtc = "rtc0" if os.path.exists('/dev/rtc'): rtc = os.readlink('/dev/rtc') rtc = '/sys/class/rtc/'+rtc if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \ os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'): self.rtcpath = rtc def setOutputFile(self): if((self.htmlfile == '') and (self.dmesgfile != '')): m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile) if(m): self.htmlfile = m.group('name')+'.html' if((self.htmlfile == '') and (self.ftracefile != '')): m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile) if(m): self.htmlfile = m.group('name')+'.html' if(self.htmlfile == ''): self.htmlfile = 'output.html' def initTestOutput(self, subdir): if(not self.android): self.prefix = self.hostname v = open('/proc/version', 'r').read().strip() kver = string.split(v)[2] else: self.prefix = 'android' v = os.popen(self.adb+' shell cat /proc/version').read().strip() kver = string.split(v)[2] testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S') if(subdir != "."): self.testdir = subdir+"/"+testtime else: self.testdir = testtime self.teststamp = \ '# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver self.dmesgfile = \ self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt' self.ftracefile = \ self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt' self.htmlfile = \ self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html' os.mkdir(self.testdir) def setDeviceFilter(self, devnames): self.devicefilter = string.split(devnames) def rtcWakeAlarm(self): os.system('echo 0 > '+self.rtcpath+'/wakealarm') outD = open(self.rtcpath+'/date', 'r').read().strip() outT = open(self.rtcpath+'/time', 'r').read().strip() mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD) mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT) if(mD and mT): # get the current time from hardware utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds()) dt = datetime(\ int(mD.group('y')), int(mD.group('m')), int(mD.group('d')), int(mT.group('h')), int(mT.group('m')), int(mT.group('s'))) nowtime = int(dt.strftime('%s')) + utcoffset else: # if hardware time fails, use the software time nowtime = int(datetime.now().strftime('%s')) alarm = nowtime + self.rtcwaketime os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath)) sysvals = SystemValues() # Class: DeviceNode # Description: # A container used to create a device hierachy, with a single root node # and a tree of child nodes. Used by Data.deviceTopology() class DeviceNode: name = '' children = 0 depth = 0 def __init__(self, nodename, nodedepth): self.name = nodename self.children = [] self.depth = nodedepth # Class: Data # Description: # The primary container for suspend/resume test data. There is one for # each test run. The data is organized into a cronological hierarchy: # Data.dmesg { # root structure, started as dmesg & ftrace, but now only ftrace # contents: times for suspend start/end, resume start/end, fwdata # phases { # 10 sequential, non-overlapping phases of S/R # contents: times for phase start/end, order/color data for html # devlist { # device callback or action list for this phase # device { # a single device callback or generic action # contents: start/stop times, pid/cpu/driver info # parents/children, html id for timeline/callgraph # optionally includes an ftrace callgraph # optionally includes intradev trace events # } # } # } # } # class Data: dmesg = {} # root data structure phases = [] # ordered list of phases start = 0.0 # test start end = 0.0 # test end tSuspended = 0.0 # low-level suspend start tResumed = 0.0 # low-level resume start tLow = 0.0 # time spent in low-level suspend (standby/freeze) fwValid = False # is firmware data available fwSuspend = 0 # time spent in firmware suspend fwResume = 0 # time spent in firmware resume dmesgtext = [] # dmesg text file in memory testnumber = 0 idstr = '' html_device_id = 0 stamp = 0 outfile = '' def __init__(self, num): idchar = 'abcdefghijklmnopqrstuvwxyz' self.testnumber = num self.idstr = idchar[num] self.dmesgtext = [] self.phases = [] self.dmesg = { # fixed list of 10 phases 'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#CCFFCC', 'order': 0}, 'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#88FF88', 'order': 1}, 'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#00AA00', 'order': 2}, 'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#008888', 'order': 3}, 'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#0000FF', 'order': 4}, 'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FF0000', 'order': 5}, 'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FF9900', 'order': 6}, 'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FFCC00', 'order': 7}, 'resume': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FFFF88', 'order': 8}, 'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FFFFCC', 'order': 9} } self.phases = self.sortedPhases() def getStart(self): return self.dmesg[self.phases[0]]['start'] def setStart(self, time): self.start = time self.dmesg[self.phases[0]]['start'] = time def getEnd(self): return self.dmesg[self.phases[-1]]['end'] def setEnd(self, time): self.end = time self.dmesg[self.phases[-1]]['end'] = time def isTraceEventOutsideDeviceCalls(self, pid, time): for phase in self.phases: list = self.dmesg[phase]['list'] for dev in list: d = list[dev] if(d['pid'] == pid and time >= d['start'] and time <= d['end']): return False return True def addIntraDevTraceEvent(self, action, name, pid, time): if(action == 'mutex_lock_try'): color = 'red' elif(action == 'mutex_lock_pass'): color = 'green' elif(action == 'mutex_unlock'): color = 'blue' else: # create separate colors based on the name v1 = len(name)*10 % 256 v2 = string.count(name, 'e')*100 % 256 v3 = ord(name[0])*20 % 256 color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3) for phase in self.phases: list = self.dmesg[phase]['list'] for dev in list: d = list[dev] if(d['pid'] == pid and time >= d['start'] and time <= d['end']): e = TraceEvent(action, name, color, time) if('traceevents' not in d): d['traceevents'] = [] d['traceevents'].append(e) return d break return 0 def capIntraDevTraceEvent(self, action, name, pid, time): for phase in self.phases: list = self.dmesg[phase]['list'] for dev in list: d = list[dev] if(d['pid'] == pid and time >= d['start'] and time <= d['end']): if('traceevents' not in d): return for e in d['traceevents']: if(e.action == action and e.name == name and not e.ready): e.length = time - e.time e.ready = True break return def trimTimeVal(self, t, t0, dT, left): if left: if(t > t0): if(t - dT < t0): return t0 return t - dT else: return t else: if(t < t0 + dT): if(t > t0): return t0 + dT return t + dT else: return t def trimTime(self, t0, dT, left): self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left) self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left) self.start = self.trimTimeVal(self.start, t0, dT, left) self.end = self.trimTimeVal(self.end, t0, dT, left) for phase in self.phases: p = self.dmesg[phase] p['start'] = self.trimTimeVal(p['start'], t0, dT, left) p['end'] = self.trimTimeVal(p['end'], t0, dT, left) list = p['list'] for name in list: d = list[name] d['start'] = self.trimTimeVal(d['start'], t0, dT, left) d['end'] = self.trimTimeVal(d['end'], t0, dT, left) if('ftrace' in d): cg = d['ftrace'] cg.start = self.trimTimeVal(cg.start, t0, dT, left) cg.end = self.trimTimeVal(cg.end, t0, dT, left) for line in cg.list: line.time = self.trimTimeVal(line.time, t0, dT, left) if('traceevents' in d): for e in d['traceevents']: e.time = self.trimTimeVal(e.time, t0, dT, left) def normalizeTime(self, tZero): # first trim out any standby or freeze clock time if(self.tSuspended != self.tResumed): if(self.tResumed > tZero): self.trimTime(self.tSuspended, \ self.tResumed-self.tSuspended, True) else: self.trimTime(self.tSuspended, \ self.tResumed-self.tSuspended, False) # shift the timeline so that tZero is the new 0 self.tSuspended -= tZero self.tResumed -= tZero self.start -= tZero self.end -= tZero for phase in self.phases: p = self.dmesg[phase] p['start'] -= tZero p['end'] -= tZero list = p['list'] for name in list: d = list[name] d['start'] -= tZero d['end'] -= tZero if('ftrace' in d): cg = d['ftrace'] cg.start -= tZero cg.end -= tZero for line in cg.list: line.time -= tZero if('traceevents' in d): for e in d['traceevents']: e.time -= tZero def newPhaseWithSingleAction(self, phasename, devname, start, end, color): for phase in self.phases: self.dmesg[phase]['order'] += 1 self.html_device_id += 1 devid = '%s%d' % (self.idstr, self.html_device_id) list = dict() list[devname] = \ {'start': start, 'end': end, 'pid': 0, 'par': '', 'length': (end-start), 'row': 0, 'id': devid, 'drv': '' }; self.dmesg[phasename] = \ {'list': list, 'start': start, 'end': end, 'row': 0, 'color': color, 'order': 0} self.phases = self.sortedPhases() def newPhase(self, phasename, start, end, color, order): if(order < 0): order = len(self.phases) for phase in self.phases[order:]: self.dmesg[phase]['order'] += 1 if(order > 0): p = self.phases[order-1] self.dmesg[p]['end'] = start if(order < len(self.phases)): p = self.phases[order] self.dmesg[p]['start'] = end list = dict() self.dmesg[phasename] = \ {'list': list, 'start': start, 'end': end, 'row': 0, 'color': color, 'order': order} self.phases = self.sortedPhases() def setPhase(self, phase, ktime, isbegin): if(isbegin): self.dmesg[phase]['start'] = ktime else: self.dmesg[phase]['end'] = ktime def dmesgSortVal(self, phase): return self.dmesg[phase]['order'] def sortedPhases(self): return sorted(self.dmesg, key=self.dmesgSortVal) def sortedDevices(self, phase): list = self.dmesg[phase]['list'] slist = [] tmp = dict() for devname in list: dev = list[devname] tmp[dev['start']] = devname for t in sorted(tmp): slist.append(tmp[t]) return slist def fixupInitcalls(self, phase, end): # if any calls never returned, clip them at system resume end phaselist = self.dmesg[phase]['list'] for devname in phaselist: dev = phaselist[devname] if(dev['end'] < 0): dev['end'] = end vprint('%s (%s): callback didnt return' % (devname, phase)) def deviceFilter(self, devicefilter): # remove all by the relatives of the filter devnames filter = [] for phase in self.phases: list = self.dmesg[phase]['list'] for name in devicefilter: dev = name while(dev in list): if(dev not in filter): filter.append(dev) dev = list[dev]['par'] children = self.deviceDescendants(name, phase) for dev in children: if(dev not in filter): filter.append(dev) for phase in self.phases: list = self.dmesg[phase]['list'] rmlist = [] for name in list: pid = list[name]['pid'] if(name not in filter and pid >= 0): rmlist.append(name) for name in rmlist: del list[name] def fixupInitcallsThatDidntReturn(self): # if any calls never returned, clip them at system resume end for phase in self.phases: self.fixupInitcalls(phase, self.getEnd()) def newActionGlobal(self, name, start, end): # which phase is this device callback or action "in" targetphase = "none" overlap = 0.0 for phase in self.phases: pstart = self.dmesg[phase]['start'] pend = self.dmesg[phase]['end'] o = max(0, min(end, pend) - max(start, pstart)) if(o > overlap): targetphase = phase overlap = o if targetphase in self.phases: self.newAction(targetphase, name, -1, '', start, end, '') return True return False def newAction(self, phase, name, pid, parent, start, end, drv): # new device callback for a specific phase self.html_device_id += 1 devid = '%s%d' % (self.idstr, self.html_device_id) list = self.dmesg[phase]['list'] length = -1.0 if(start >= 0 and end >= 0): length = end - start list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv } def deviceIDs(self, devlist, phase): idlist = [] list = self.dmesg[phase]['list'] for devname in list: if devname in devlist: idlist.append(list[devname]['id']) return idlist def deviceParentID(self, devname, phase): pdev = '' pdevid = '' list = self.dmesg[phase]['list'] if devname in list: pdev = list[devname]['par'] if pdev in list: return list[pdev]['id'] return pdev def deviceChildren(self, devname, phase): devlist = [] list = self.dmesg[phase]['list'] for child in list: if(list[child]['par'] == devname): devlist.append(child) return devlist def deviceDescendants(self, devname, phase): children = self.deviceChildren(devname, phase) family = children for child in children: family += self.deviceDescendants(child, phase) return family def deviceChildrenIDs(self, devname, phase): devlist = self.deviceChildren(devname, phase) return self.deviceIDs(devlist, phase) def printDetails(self): vprint(' test start: %f' % self.start) for phase in self.phases: dc = len(self.dmesg[phase]['list']) vprint(' %16s: %f - %f (%d devices)' % (phase, \ self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc)) vprint(' test end: %f' % self.end) def masterTopology(self, name, list, depth): node = DeviceNode(name, depth) for cname in list: clist = self.deviceChildren(cname, 'resume') cnode = self.masterTopology(cname, clist, depth+1) node.children.append(cnode) return node def printTopology(self, node): html = '' if node.name: info = '' drv = '' for phase in self.phases: list = self.dmesg[phase]['list'] if node.name in list: s = list[node.name]['start'] e = list[node.name]['end'] if list[node.name]['drv']: drv = ' {'+list[node.name]['drv']+'}' info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000)) html += '<li><b>'+node.name+drv+'</b>' if info: html += '<ul>'+info+'</ul>' html += '</li>' if len(node.children) > 0: html += '<ul>' for cnode in node.children: html += self.printTopology(cnode) html += '</ul>' return html def rootDeviceList(self): # list of devices graphed real = [] for phase in self.dmesg: list = self.dmesg[phase]['list'] for dev in list: if list[dev]['pid'] >= 0 and dev not in real: real.append(dev) # list of top-most root devices rootlist = [] for phase in self.dmesg: list = self.dmesg[phase]['list'] for dev in list: pdev = list[dev]['par'] if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)): continue if pdev and pdev not in real and pdev not in rootlist: rootlist.append(pdev) return rootlist def deviceTopology(self): rootlist = self.rootDeviceList() master = self.masterTopology('', rootlist, 0) return self.printTopology(master) # Class: TraceEvent # Description: # A container for trace event data found in the ftrace file class TraceEvent: ready = False name = '' time = 0.0 color = '#FFFFFF' length = 0.0 action = '' def __init__(self, a, n, c, t): self.action = a self.name = n self.color = c self.time = t # Class: FTraceLine # Description: # A container for a single line of ftrace data. There are six basic types: # callgraph line: # call: " dpm_run_callback() {" # return: " }" # leaf: " dpm_run_callback();" # trace event: # tracing_mark_write: SUSPEND START or RESUME COMPLETE # suspend_resume: phase or custom exec block data # device_pm_callback: device callback info class FTraceLine: time = 0.0 length = 0.0 fcall = False freturn = False fevent = False depth = 0 name = '' type = '' def __init__(self, t, m, d): self.time = float(t) # is this a trace event if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)): if(d == 'traceevent'): # nop format trace event msg = m else: # function_graph format trace event em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m) msg = em.group('msg') emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg) if(emm): self.name = emm.group('msg') self.type = emm.group('call') else: self.name = msg self.fevent = True return # convert the duration to seconds if(d): self.length = float(d)/1000000 # the indentation determines the depth match = re.match('^(?P<d> *)(?P<o>.*)$', m) if(not match): return self.depth = self.getDepth(match.group('d')) m = match.group('o') # function return if(m[0] == '}'): self.freturn = True if(len(m) > 1): # includes comment with function name match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m) if(match): self.name = match.group('n') # function call else: self.fcall = True # function call with children if(m[-1] == '{'): match = re.match('^(?P<n>.*) *\(.*', m) if(match): self.name = match.group('n') # function call with no children (leaf) elif(m[-1] == ';'): self.freturn = True match = re.match('^(?P<n>.*) *\(.*', m) if(match): self.name = match.group('n') # something else (possibly a trace marker) else: self.name = m def getDepth(self, str): return len(str)/2 def debugPrint(self, dev): if(self.freturn and self.fcall): print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \ self.depth, self.name, self.length*1000000)) elif(self.freturn): print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \ self.depth, self.name, self.length*1000000)) else: print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \ self.depth, self.name, self.length*1000000)) # Class: FTraceCallGraph # Description: # A container for the ftrace callgraph of a single recursive function. # This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph # Each instance is tied to a single device in a single phase, and is # comprised of an ordered list of FTraceLine objects class FTraceCallGraph: start = -1.0 end = -1.0 list = [] invalid = False depth = 0 def __init__(self): self.start = -1.0 self.end = -1.0 self.list = [] self.depth = 0 def setDepth(self, line): if(line.fcall and not line.freturn): line.depth = self.depth self.depth += 1 elif(line.freturn and not line.fcall): self.depth -= 1 line.depth = self.depth else: line.depth = self.depth def addLine(self, line, match): if(not self.invalid): self.setDepth(line) if(line.depth == 0 and line.freturn): if(self.start < 0): self.start = line.time self.end = line.time self.list.append(line) return True if(self.invalid): return False if(len(self.list) >= 1000000 or self.depth < 0): if(len(self.list) > 0): first = self.list[0] self.list = [] self.list.append(first) self.invalid = True if(not match): return False id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu')) window = '(%f - %f)' % (self.start, line.time) if(self.depth < 0): print('Too much data for '+id+\ ' (buffer overflow), ignoring this callback') else: print('Too much data for '+id+\ ' '+window+', ignoring this callback') return False self.list.append(line) if(self.start < 0): self.start = line.time return False def slice(self, t0, tN): minicg = FTraceCallGraph() count = -1 firstdepth = 0 for l in self.list: if(l.time < t0 or l.time > tN): continue if(count < 0): if(not l.fcall or l.name == 'dev_driver_string'): continue firstdepth = l.depth count = 0 l.depth -= firstdepth minicg.addLine(l, 0) if((count == 0 and l.freturn and l.fcall) or (count > 0 and l.depth <= 0)): break count += 1 return minicg def sanityCheck(self): stack = dict() cnt = 0 for l in self.list: if(l.fcall and not l.freturn): stack[l.depth] = l cnt += 1 elif(l.freturn and not l.fcall): if(l.depth not in stack): return False stack[l.depth].length = l.length stack[l.depth] = 0 l.length = 0 cnt -= 1 if(cnt == 0): return True return False def debugPrint(self, filename): if(filename == 'stdout'): print('[%f - %f]') % (self.start, self.end) for l in self.list: if(l.freturn and l.fcall): print('%f (%02d): %s(); (%.3f us)' % (l.time, \ l.depth, l.name, l.length*1000000)) elif(l.freturn): print('%f (%02d): %s} (%.3f us)' % (l.time, \ l.depth, l.name, l.length*1000000)) else: print('%f (%02d): %s() { (%.3f us)' % (l.time, \ l.depth, l.name, l.length*1000000)) print(' ') else: fp = open(filename, 'w') print(filename) for l in self.list: if(l.freturn and l.fcall): fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \ l.depth, l.name, l.length*1000000)) elif(l.freturn): fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \ l.depth, l.name, l.length*1000000)) else: fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \ l.depth, l.name, l.length*1000000)) fp.close() # Class: Timeline # Description: # A container for a suspend/resume html timeline. In older versions # of the script there were multiple timelines, but in the latest # there is only one. class Timeline: html = {} scaleH = 0.0 # height of the row as a percent of the timeline height rowH = 0.0 # height of each row in percent of the timeline height row_height_pixels = 30 maxrows = 0 height = 0 def __init__(self): self.html = { 'timeline': '', 'legend': '', 'scale': '' } def setRows(self, rows): self.maxrows = int(rows) self.scaleH = 100.0/float(self.maxrows) self.height = self.maxrows*self.row_height_pixels r = float(self.maxrows - 1) if(r < 1.0): r = 1.0 self.rowH = (100.0 - self.scaleH)/r # Class: TestRun # Description: # A container for a suspend/resume test run. This is necessary as # there could be more than one, and they need to be separate. class TestRun: ftrace_line_fmt_fg = \ '^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\ ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\ '[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)' ftrace_line_fmt_nop = \ ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\ '(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\ '(?P<msg>.*)' ftrace_line_fmt = ftrace_line_fmt_nop cgformat = False ftemp = dict() ttemp = dict() inthepipe = False tracertype = '' data = 0 def __init__(self, dataobj): self.data = dataobj self.ftemp = dict() self.ttemp = dict() def isReady(self): if(tracertype == '' or not data): return False return True def setTracerType(self, tracer): self.tracertype = tracer if(tracer == 'function_graph'): self.cgformat = True self.ftrace_line_fmt = self.ftrace_line_fmt_fg elif(tracer == 'nop'): self.ftrace_line_fmt = self.ftrace_line_fmt_nop else: doError('Invalid tracer format: [%s]' % tracer, False) # ----------------- FUNCTIONS -------------------- # Function: vprint # Description: # verbose print (prints only with -verbose option) # Arguments: # msg: the debug/log message to print def vprint(msg): global sysvals if(sysvals.verbose): print(msg) # Function: initFtrace # Description: # Configure ftrace to use trace events and/or a callgraph def initFtrace(): global sysvals tp = sysvals.tpath cf = 'dpm_run_callback' if(sysvals.usetraceeventsonly): cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback' if(sysvals.usecallgraph or sysvals.usetraceevents): print('INITIALIZING FTRACE...') # turn trace off os.system('echo 0 > '+tp+'tracing_on') # set the trace clock to global os.system('echo global > '+tp+'trace_clock') # set trace buffer to a huge value os.system('echo nop > '+tp+'current_tracer') os.system('echo 100000 > '+tp+'buffer_size_kb') # initialize the callgraph trace, unless this is an x2 run if(sysvals.usecallgraph and sysvals.execcount == 1): # set trace type os.system('echo function_graph > '+tp+'current_tracer') os.system('echo "" > '+tp+'set_ftrace_filter') # set trace format options os.system('echo funcgraph-abstime > '+tp+'trace_options') os.system('echo funcgraph-proc > '+tp+'trace_options') # focus only on device suspend and resume os.system('cat '+tp+'available_filter_functions | grep '+\ cf+' > '+tp+'set_graph_function') if(sysvals.usetraceevents): # turn trace events on events = iter(sysvals.traceevents) for e in events: os.system('echo 1 > '+sysvals.epath+e+'/enable') # clear the trace buffer os.system('echo "" > '+tp+'trace') # Function: initFtraceAndroid # Description: # Configure ftrace to capture trace events def initFtraceAndroid(): global sysvals tp = sysvals.tpath if(sysvals.usetraceevents): print('INITIALIZING FTRACE...') # turn trace off os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'") # set the trace clock to global os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'") # set trace buffer to a huge value os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'") os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'") # turn trace events on events = iter(sysvals.traceevents) for e in events: os.system(sysvals.adb+" shell 'echo 1 > "+\ sysvals.epath+e+"/enable'") # clear the trace buffer os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'") # Function: verifyFtrace # Description: # Check that ftrace is working on the system # Output: # True or False def verifyFtrace(): global sysvals # files needed for any trace data files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock', 'trace_marker', 'trace_options', 'tracing_on'] # files needed for callgraph trace data tp = sysvals.tpath if(sysvals.usecallgraph): files += [ 'available_filter_functions', 'set_ftrace_filter', 'set_graph_function' ] for f in files: if(sysvals.android): out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip() if(out != tp+f): return False else: if(os.path.exists(tp+f) == False): return False return True # Function: parseStamp # Description: # Pull in the stamp comment line from the data file(s), # create the stamp, and add it to the global sysvals object # Arguments: # m: the valid re.match output for the stamp line def parseStamp(m, data): global sysvals data.stamp = {'time': '', 'host': '', 'mode': ''} dt = datetime(int(m.group('y'))+2000, int(m.group('m')), int(m.group('d')), int(m.group('H')), int(m.group('M')), int(m.group('S'))) data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p') data.stamp['host'] = m.group('host') data.stamp['mode'] = m.group('mode') data.stamp['kernel'] = m.group('kernel') sysvals.suspendmode = data.stamp['mode'] if not sysvals.stamp: sysvals.stamp = data.stamp # Function: diffStamp # Description: # compare the host, kernel, and mode fields in 3 stamps # Arguments: # stamp1: string array with mode, kernel, and host # stamp2: string array with mode, kernel, and host # Return: # True if stamps differ, False if they're the same def diffStamp(stamp1, stamp2): if 'host' in stamp1 and 'host' in stamp2: if stamp1['host'] != stamp2['host']: return True if 'kernel' in stamp1 and 'kernel' in stamp2: if stamp1['kernel'] != stamp2['kernel']: return True if 'mode' in stamp1 and 'mode' in stamp2: if stamp1['mode'] != stamp2['mode']: return True return False # Function: doesTraceLogHaveTraceEvents # Description: # Quickly determine if the ftrace log has some or all of the trace events # required for primary parsing. Set the usetraceevents and/or # usetraceeventsonly flags in the global sysvals object def doesTraceLogHaveTraceEvents(): global sysvals sysvals.usetraceeventsonly = True sysvals.usetraceevents = False for e in sysvals.traceevents: out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read() if(not out): sysvals.usetraceeventsonly = False if(e == 'suspend_resume' and out): sysvals.usetraceevents = True # Function: appendIncompleteTraceLog # Description: # [deprecated for kernel 3.15 or newer] # Legacy support of ftrace outputs that lack the device_pm_callback # and/or suspend_resume trace events. The primary data should be # taken from dmesg, and this ftrace is used only for callgraph data # or custom actions in the timeline. The data is appended to the Data # objects provided. # Arguments: # testruns: the array of Data objects obtained from parseKernelLog def appendIncompleteTraceLog(testruns): global sysvals # create TestRun vessels for ftrace parsing testcnt = len(testruns) testidx = -1 testrun = [] for data in testruns: testrun.append(TestRun(data)) # extract the callgraph and traceevent data vprint('Analyzing the ftrace data...') tf = open(sysvals.ftracefile, 'r') for line in tf: # remove any latent carriage returns line = line.replace('\r\n', '') # grab the time stamp first (signifies the start of the test run) m = re.match(sysvals.stampfmt, line) if(m): testidx += 1 parseStamp(m, testrun[testidx].data) continue # pull out any firmware data if(re.match(sysvals.firmwarefmt, line)): continue # if we havent found a test time stamp yet keep spinning til we do if(testidx < 0): continue # determine the trace data type (required for further parsing) m = re.match(sysvals.tracertypefmt, line) if(m): tracer = m.group('t') testrun[testidx].setTracerType(tracer) continue # parse only valid lines, if this isnt one move on m = re.match(testrun[testidx].ftrace_line_fmt, line) if(not m): continue # gather the basic message data from the line m_time = m.group('time') m_pid = m.group('pid') m_msg = m.group('msg') if(testrun[testidx].cgformat): m_param3 = m.group('dur') else: m_param3 = 'traceevent' if(m_time and m_pid and m_msg): t = FTraceLine(m_time, m_msg, m_param3) pid = int(m_pid) else: continue # the line should be a call, return, or event if(not t.fcall and not t.freturn and not t.fevent): continue # only parse the ftrace data during suspend/resume data = testrun[testidx].data if(not testrun[testidx].inthepipe): # look for the suspend start marker if(t.fevent): if(t.name == 'SUSPEND START'): testrun[testidx].inthepipe = True data.setStart(t.time) continue else: # trace event processing if(t.fevent): if(t.name == 'RESUME COMPLETE'): testrun[testidx].inthepipe = False data.setEnd(t.time) if(testidx == testcnt - 1): break continue # general trace events have two types, begin and end if(re.match('(?P<name>.*) begin$', t.name)): isbegin = True elif(re.match('(?P<name>.*) end$', t.name)): isbegin = False else: continue m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name) if(m): val = m.group('val') if val == '0': name = m.group('name') else: name = m.group('name')+'['+val+']' else: m = re.match('(?P<name>.*) .*', t.name) name = m.group('name') # special processing for trace events if re.match('dpm_prepare\[.*', name): continue elif re.match('machine_suspend.*', name): continue elif re.match('suspend_enter\[.*', name): if(not isbegin): data.dmesg['suspend_prepare']['end'] = t.time continue elif re.match('dpm_suspend\[.*', name): if(not isbegin): data.dmesg['suspend']['end'] = t.time continue elif re.match('dpm_suspend_late\[.*', name): if(isbegin): data.dmesg['suspend_late']['start'] = t.time else: data.dmesg['suspend_late']['end'] = t.time continue elif re.match('dpm_suspend_noirq\[.*', name): if(isbegin): data.dmesg['suspend_noirq']['start'] = t.time else: data.dmesg['suspend_noirq']['end'] = t.time continue elif re.match('dpm_resume_noirq\[.*', name): if(isbegin): data.dmesg['resume_machine']['end'] = t.time data.dmesg['resume_noirq']['start'] = t.time else: data.dmesg['resume_noirq']['end'] = t.time continue elif re.match('dpm_resume_early\[.*', name): if(isbegin): data.dmesg['resume_early']['start'] = t.time else: data.dmesg['resume_early']['end'] = t.time continue elif re.match('dpm_resume\[.*', name): if(isbegin): data.dmesg['resume']['start'] = t.time else: data.dmesg['resume']['end'] = t.time continue elif re.match('dpm_complete\[.*', name): if(isbegin): data.dmesg['resume_complete']['start'] = t.time else: data.dmesg['resume_complete']['end'] = t.time continue # is this trace event outside of the devices calls if(data.isTraceEventOutsideDeviceCalls(pid, t.time)): # global events (outside device calls) are simply graphed if(isbegin): # store each trace event in ttemp if(name not in testrun[testidx].ttemp): testrun[testidx].ttemp[name] = [] testrun[testidx].ttemp[name].append(\ {'begin': t.time, 'end': t.time}) else: # finish off matching trace event in ttemp if(name in testrun[testidx].ttemp): testrun[testidx].ttemp[name][-1]['end'] = t.time else: if(isbegin): data.addIntraDevTraceEvent('', name, pid, t.time) else: data.capIntraDevTraceEvent('', name, pid, t.time) # call/return processing elif sysvals.usecallgraph: # create a callgraph object for the data if(pid not in testrun[testidx].ftemp): testrun[testidx].ftemp[pid] = [] testrun[testidx].ftemp[pid].append(FTraceCallGraph()) # when the call is finished, see which device matches it cg = testrun[testidx].ftemp[pid][-1] if(cg.addLine(t, m)): testrun[testidx].ftemp[pid].append(FTraceCallGraph()) tf.close() for test in testrun: # add the traceevent data to the device hierarchy if(sysvals.usetraceevents): for name in test.ttemp: for event in test.ttemp[name]: begin = event['begin'] end = event['end'] # if event starts before timeline start, expand timeline if(begin < test.data.start): test.data.setStart(begin) # if event ends after timeline end, expand the timeline if(end > test.data.end): test.data.setEnd(end) test.data.newActionGlobal(name, begin, end) # add the callgraph data to the device hierarchy for pid in test.ftemp: for cg in test.ftemp[pid]: if(not cg.sanityCheck()): id = 'task %s cpu %s' % (pid, m.group('cpu')) vprint('Sanity check failed for '+\ id+', ignoring this callback') continue callstart = cg.start callend = cg.end for p in test.data.phases: if(test.data.dmesg[p]['start'] <= callstart and callstart <= test.data.dmesg[p]['end']): list = test.data.dmesg[p]['list'] for devname in list: dev = list[devname] if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']): dev['ftrace'] = cg break if(sysvals.verbose): test.data.printDetails() # add the time in between the tests as a new phase so we can see it if(len(testruns) > 1): t1e = testruns[0].getEnd() t2s = testruns[-1].getStart() testruns[-1].newPhaseWithSingleAction('user mode', \ 'user mode', t1e, t2s, '#FF9966') # Function: parseTraceLog # Description: # Analyze an ftrace log output file generated from this app during # the execution phase. Used when the ftrace log is the primary data source # and includes the suspend_resume and device_pm_callback trace events # The ftrace filename is taken from sysvals # Output: # An array of Data objects def parseTraceLog(): global sysvals vprint('Analyzing the ftrace data...') if(os.path.exists(sysvals.ftracefile) == False): doError('%s doesnt exist' % sysvals.ftracefile, False) # extract the callgraph and traceevent data testruns = [] testdata = [] testrun = 0 data = 0 tf = open(sysvals.ftracefile, 'r') phase = 'suspend_prepare' for line in tf: # remove any latent carriage returns line = line.replace('\r\n', '') # stamp line: each stamp means a new test run m = re.match(sysvals.stampfmt, line) if(m): data = Data(len(testdata)) testdata.append(data) testrun = TestRun(data) testruns.append(testrun) parseStamp(m, data) continue if(not data): continue # firmware line: pull out any firmware data m = re.match(sysvals.firmwarefmt, line) if(m): data.fwSuspend = int(m.group('s')) data.fwResume = int(m.group('r')) if(data.fwSuspend > 0 or data.fwResume > 0): data.fwValid = True continue # tracer type line: determine the trace data type m = re.match(sysvals.tracertypefmt, line) if(m): tracer = m.group('t') testrun.setTracerType(tracer) continue # post resume time line: did this test run include post-resume data m = re.match(sysvals.postresumefmt, line) if(m): t = int(m.group('t')) if(t > 0): sysvals.postresumetime = t continue # ftrace line: parse only valid lines m = re.match(testrun.ftrace_line_fmt, line) if(not m): continue # gather the basic message data from the line m_time = m.group('time') m_pid = m.group('pid') m_msg = m.group('msg') if(testrun.cgformat): m_param3 = m.group('dur') else: m_param3 = 'traceevent' if(m_time and m_pid and m_msg): t = FTraceLine(m_time, m_msg, m_param3) pid = int(m_pid) else: continue # the line should be a call, return, or event if(not t.fcall and not t.freturn and not t.fevent): continue # only parse the ftrace data during suspend/resume if(not testrun.inthepipe): # look for the suspend start marker if(t.fevent): if(t.name == 'SUSPEND START'): testrun.inthepipe = True data.setStart(t.time) continue # trace event processing if(t.fevent): if(t.name == 'RESUME COMPLETE'): if(sysvals.postresumetime > 0): phase = 'post_resume' data.newPhase(phase, t.time, t.time, '#FF9966', -1) else: testrun.inthepipe = False data.setEnd(t.time) continue if(phase == 'post_resume'): data.setEnd(t.time) if(t.type == 'suspend_resume'): # suspend_resume trace events have two types, begin and end if(re.match('(?P<name>.*) begin$', t.name)): isbegin = True elif(re.match('(?P<name>.*) end$', t.name)): isbegin = False else: continue m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name) if(m): val = m.group('val') if val == '0': name = m.group('name') else: name = m.group('name')+'['+val+']' else: m = re.match('(?P<name>.*) .*', t.name) name = m.group('name') # ignore these events if(re.match('acpi_suspend\[.*', t.name) or re.match('suspend_enter\[.*', name)): continue # -- phase changes -- # suspend_prepare start if(re.match('dpm_prepare\[.*', t.name)): phase = 'suspend_prepare' if(not isbegin): data.dmesg[phase]['end'] = t.time continue # suspend start elif(re.match('dpm_suspend\[.*', t.name)): phase = 'suspend' data.setPhase(phase, t.time, isbegin) continue # suspend_late start elif(re.match('dpm_suspend_late\[.*', t.name)): phase = 'suspend_late' data.setPhase(phase, t.time, isbegin) continue # suspend_noirq start elif(re.match('dpm_suspend_noirq\[.*', t.name)): phase = 'suspend_noirq' data.setPhase(phase, t.time, isbegin) if(not isbegin): phase = 'suspend_machine' data.dmesg[phase]['start'] = t.time continue # suspend_machine/resume_machine elif(re.match('machine_suspend\[.*', t.name)): if(isbegin): phase = 'suspend_machine' data.dmesg[phase]['end'] = t.time data.tSuspended = t.time else: if(sysvals.suspendmode in ['mem', 'disk']): data.dmesg['suspend_machine']['end'] = t.time data.tSuspended = t.time phase = 'resume_machine' data.dmesg[phase]['start'] = t.time data.tResumed = t.time data.tLow = data.tResumed - data.tSuspended continue # resume_noirq start elif(re.match('dpm_resume_noirq\[.*', t.name)): phase = 'resume_noirq' data.setPhase(phase, t.time, isbegin) if(isbegin): data.dmesg['resume_machine']['end'] = t.time continue # resume_early start elif(re.match('dpm_resume_early\[.*', t.name)): phase = 'resume_early' data.setPhase(phase, t.time, isbegin) continue # resume start elif(re.match('dpm_resume\[.*', t.name)): phase = 'resume' data.setPhase(phase, t.time, isbegin) continue # resume complete start elif(re.match('dpm_complete\[.*', t.name)): phase = 'resume_complete' if(isbegin): data.dmesg[phase]['start'] = t.time continue # is this trace event outside of the devices calls if(data.isTraceEventOutsideDeviceCalls(pid, t.time)): # global events (outside device calls) are simply graphed if(name not in testrun.ttemp): testrun.ttemp[name] = [] if(isbegin): # create a new list entry testrun.ttemp[name].append(\ {'begin': t.time, 'end': t.time}) else: if(len(testrun.ttemp[name]) > 0): # if an antry exists, assume this is its end testrun.ttemp[name][-1]['end'] = t.time elif(phase == 'post_resume'): # post resume events can just have ends testrun.ttemp[name].append({ 'begin': data.dmesg[phase]['start'], 'end': t.time}) else: if(isbegin): data.addIntraDevTraceEvent('', name, pid, t.time) else: data.capIntraDevTraceEvent('', name, pid, t.time) # device callback start elif(t.type == 'device_pm_callback_start'): m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\ t.name); if(not m): continue drv = m.group('drv') n = m.group('d') p = m.group('p') if(n and p): data.newAction(phase, n, pid, p, t.time, -1, drv) # device callback finish elif(t.type == 'device_pm_callback_end'): m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name); if(not m): continue n = m.group('d') list = data.dmesg[phase]['list'] if(n in list): dev = list[n] dev['length'] = t.time - dev['start'] dev['end'] = t.time # callgraph processing elif sysvals.usecallgraph: # this shouldn't happen, but JIC, ignore callgraph data post-res if(phase == 'post_resume'): continue # create a callgraph object for the data if(pid not in testrun.ftemp): testrun.ftemp[pid] = [] testrun.ftemp[pid].append(FTraceCallGraph()) # when the call is finished, see which device matches it cg = testrun.ftemp[pid][-1] if(cg.addLine(t, m)): testrun.ftemp[pid].append(FTraceCallGraph()) tf.close() for test in testruns: # add the traceevent data to the device hierarchy if(sysvals.usetraceevents): for name in test.ttemp: for event in test.ttemp[name]: begin = event['begin'] end = event['end'] # if event starts before timeline start, expand timeline if(begin < test.data.start): test.data.setStart(begin) # if event ends after timeline end, expand the timeline if(end > test.data.end): test.data.setEnd(end) test.data.newActionGlobal(name, begin, end) # add the callgraph data to the device hierarchy borderphase = { 'dpm_prepare': 'suspend_prepare', 'dpm_complete': 'resume_complete' } for pid in test.ftemp: for cg in test.ftemp[pid]: if len(cg.list) < 2: continue if(not cg.sanityCheck()): id = 'task %s cpu %s' % (pid, m.group('cpu')) vprint('Sanity check failed for '+\ id+', ignoring this callback') continue callstart = cg.start callend = cg.end if(cg.list[0].name in borderphase): p = borderphase[cg.list[0].name] list = test.data.dmesg[p]['list'] for devname in list: dev = list[devname] if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']): dev['ftrace'] = cg.slice(dev['start'], dev['end']) continue if(cg.list[0].name != 'dpm_run_callback'): continue for p in test.data.phases: if(test.data.dmesg[p]['start'] <= callstart and callstart <= test.data.dmesg[p]['end']): list = test.data.dmesg[p]['list'] for devname in list: dev = list[devname] if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']): dev['ftrace'] = cg break # fill in any missing phases for data in testdata: lp = data.phases[0] for p in data.phases: if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0): print('WARNING: phase "%s" is missing!' % p) if(data.dmesg[p]['start'] < 0): data.dmesg[p]['start'] = data.dmesg[lp]['end'] if(p == 'resume_machine'): data.tSuspended = data.dmesg[lp]['end'] data.tResumed = data.dmesg[lp]['end'] data.tLow = 0 if(data.dmesg[p]['end'] < 0): data.dmesg[p]['end'] = data.dmesg[p]['start'] lp = p if(len(sysvals.devicefilter) > 0): data.deviceFilter(sysvals.devicefilter) data.fixupInitcallsThatDidntReturn() if(sysvals.verbose): data.printDetails() # add the time in between the tests as a new phase so we can see it if(len(testdata) > 1): t1e = testdata[0].getEnd() t2s = testdata[-1].getStart() testdata[-1].newPhaseWithSingleAction('user mode', \ 'user mode', t1e, t2s, '#FF9966') return testdata # Function: loadKernelLog # Description: # [deprecated for kernel 3.15.0 or newer] # load the dmesg file into memory and fix up any ordering issues # The dmesg filename is taken from sysvals # Output: # An array of empty Data objects with only their dmesgtext attributes set def loadKernelLog(): global sysvals vprint('Analyzing the dmesg data...') if(os.path.exists(sysvals.dmesgfile) == False): doError('%s doesnt exist' % sysvals.dmesgfile, False) # there can be multiple test runs in a single file delineated by stamps testruns = [] data = 0 lf = open(sysvals.dmesgfile, 'r') for line in lf: line = line.replace('\r\n', '') idx = line.find('[') if idx > 1: line = line[idx:] m = re.match(sysvals.stampfmt, line) if(m): if(data): testruns.append(data) data = Data(len(testruns)) parseStamp(m, data) continue if(not data): continue m = re.match(sysvals.firmwarefmt, line) if(m): data.fwSuspend = int(m.group('s')) data.fwResume = int(m.group('r')) if(data.fwSuspend > 0 or data.fwResume > 0): data.fwValid = True continue m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line) if(m): data.dmesgtext.append(line) if(re.match('ACPI: resume from mwait', m.group('msg'))): print('NOTE: This suspend appears to be freeze rather than'+\ ' %s, it will be treated as such' % sysvals.suspendmode) sysvals.suspendmode = 'freeze' else: vprint('ignoring dmesg line: %s' % line.replace('\n', '')) testruns.append(data) lf.close() if(not data): print('ERROR: analyze_suspend header missing from dmesg log') sys.exit() # fix lines with same timestamp/function with the call and return swapped for data in testruns: last = '' for line in data.dmesgtext: mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\ '(?P<f>.*)\+ @ .*, parent: .*', line) mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\ '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last) if(mc and mr and (mc.group('t') == mr.group('t')) and (mc.group('f') == mr.group('f'))): i = data.dmesgtext.index(last) j = data.dmesgtext.index(line) data.dmesgtext[i] = line data.dmesgtext[j] = last last = line return testruns # Function: parseKernelLog # Description: # [deprecated for kernel 3.15.0 or newer] # Analyse a dmesg log output file generated from this app during # the execution phase. Create a set of device structures in memory # for subsequent formatting in the html output file # This call is only for legacy support on kernels where the ftrace # data lacks the suspend_resume or device_pm_callbacks trace events. # Arguments: # data: an empty Data object (with dmesgtext) obtained from loadKernelLog # Output: # The filled Data object def parseKernelLog(data): global sysvals phase = 'suspend_runtime' if(data.fwValid): vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \ (data.fwSuspend, data.fwResume)) # dmesg phase match table dm = { 'suspend_prepare': 'PM: Syncing filesystems.*', 'suspend': 'PM: Entering [a-z]* sleep.*', 'suspend_late': 'PM: suspend of devices complete after.*', 'suspend_noirq': 'PM: late suspend of devices complete after.*', 'suspend_machine': 'PM: noirq suspend of devices complete after.*', 'resume_machine': 'ACPI: Low-level resume complete.*', 'resume_noirq': 'ACPI: Waking up from system sleep state.*', 'resume_early': 'PM: noirq resume of devices complete after.*', 'resume': 'PM: early resume of devices complete after.*', 'resume_complete': 'PM: resume of devices complete after.*', 'post_resume': '.*Restarting tasks \.\.\..*', } if(sysvals.suspendmode == 'standby'): dm['resume_machine'] = 'PM: Restoring platform NVS memory' elif(sysvals.suspendmode == 'disk'): dm['suspend_late'] = 'PM: freeze of devices complete after.*' dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*' dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*' dm['resume_machine'] = 'PM: Restoring platform NVS memory' dm['resume_early'] = 'PM: noirq restore of devices complete after.*' dm['resume'] = 'PM: early restore of devices complete after.*' dm['resume_complete'] = 'PM: restore of devices complete after.*' elif(sysvals.suspendmode == 'freeze'): dm['resume_machine'] = 'ACPI: resume from mwait' # action table (expected events that occur and show up in dmesg) at = { 'sync_filesystems': { 'smsg': 'PM: Syncing filesystems.*', 'emsg': 'PM: Preparing system for mem sleep.*' }, 'freeze_user_processes': { 'smsg': 'Freezing user space processes .*', 'emsg': 'Freezing remaining freezable tasks.*' }, 'freeze_tasks': { 'smsg': 'Freezing remaining freezable tasks.*', 'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' }, 'ACPI prepare': { 'smsg': 'ACPI: Preparing to enter system sleep state.*', 'emsg': 'PM: Saving platform NVS memory.*' }, 'PM vns': { 'smsg': 'PM: Saving platform NVS memory.*', 'emsg': 'Disabling non-boot CPUs .*' }, } t0 = -1.0 cpu_start = -1.0 prevktime = -1.0 actions = dict() for line in data.dmesgtext: # -- preprocessing -- # parse each dmesg line into the time and message m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line) if(m): val = m.group('ktime') try: ktime = float(val) except: doWarning('INVALID DMESG LINE: '+\ line.replace('\n', ''), 'dmesg') continue msg = m.group('msg') # initialize data start to first line time if t0 < 0: data.setStart(ktime) t0 = ktime else: continue # hack for determining resume_machine end for freeze if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \ and phase == 'resume_machine' and \ re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)): data.dmesg['resume_machine']['end'] = ktime phase = 'resume_noirq' data.dmesg[phase]['start'] = ktime # -- phase changes -- # suspend start if(re.match(dm['suspend_prepare'], msg)): phase = 'suspend_prepare' data.dmesg[phase]['start'] = ktime data.setStart(ktime) # suspend start elif(re.match(dm['suspend'], msg)): data.dmesg['suspend_prepare']['end'] = ktime phase = 'suspend' data.dmesg[phase]['start'] = ktime # suspend_late start elif(re.match(dm['suspend_late'], msg)): data.dmesg['suspend']['end'] = ktime phase = 'suspend_late' data.dmesg[phase]['start'] = ktime # suspend_noirq start elif(re.match(dm['suspend_noirq'], msg)): data.dmesg['suspend_late']['end'] = ktime phase = 'suspend_noirq' data.dmesg[phase]['start'] = ktime # suspend_machine start elif(re.match(dm['suspend_machine'], msg)): data.dmesg['suspend_noirq']['end'] = ktime phase = 'suspend_machine' data.dmesg[phase]['start'] = ktime # resume_machine start elif(re.match(dm['resume_machine'], msg)): if(sysvals.suspendmode in ['freeze', 'standby']): data.tSuspended = prevktime data.dmesg['suspend_machine']['end'] = prevktime else: data.tSuspended = ktime data.dmesg['suspend_machine']['end'] = ktime phase = 'resume_machine' data.tResumed = ktime data.tLow = data.tResumed - data.tSuspended data.dmesg[phase]['start'] = ktime # resume_noirq start elif(re.match(dm['resume_noirq'], msg)): data.dmesg['resume_machine']['end'] = ktime phase = 'resume_noirq' data.dmesg[phase]['start'] = ktime # resume_early start elif(re.match(dm['resume_early'], msg)): data.dmesg['resume_noirq']['end'] = ktime phase = 'resume_early' data.dmesg[phase]['start'] = ktime # resume start elif(re.match(dm['resume'], msg)): data.dmesg['resume_early']['end'] = ktime phase = 'resume' data.dmesg[phase]['start'] = ktime # resume complete start elif(re.match(dm['resume_complete'], msg)): data.dmesg['resume']['end'] = ktime phase = 'resume_complete' data.dmesg[phase]['start'] = ktime # post resume start elif(re.match(dm['post_resume'], msg)): data.dmesg['resume_complete']['end'] = ktime data.setEnd(ktime) phase = 'post_resume' break # -- device callbacks -- if(phase in data.phases): # device init call if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)): sm = re.match('calling (?P<f>.*)\+ @ '+\ '(?P<n>.*), parent: (?P<p>.*)', msg); f = sm.group('f') n = sm.group('n') p = sm.group('p') if(f and n and p): data.newAction(phase, f, int(n), p, ktime, -1, '') # device init return elif(re.match('call (?P<f>.*)\+ returned .* after '+\ '(?P<t>.*) usecs', msg)): sm = re.match('call (?P<f>.*)\+ returned .* after '+\ '(?P<t>.*) usecs(?P<a>.*)', msg); f = sm.group('f') t = sm.group('t') list = data.dmesg[phase]['list'] if(f in list): dev = list[f] dev['length'] = int(t) dev['end'] = ktime # -- non-devicecallback actions -- # if trace events are not available, these are better than nothing if(not sysvals.usetraceevents): # look for known actions for a in at: if(re.match(at[a]['smsg'], msg)): if(a not in actions): actions[a] = [] actions[a].append({'begin': ktime, 'end': ktime}) if(re.match(at[a]['emsg'], msg)): actions[a][-1]['end'] = ktime # now look for CPU on/off events if(re.match('Disabling non-boot CPUs .*', msg)): # start of first cpu suspend cpu_start = ktime elif(re.match('Enabling non-boot CPUs .*', msg)): # start of first cpu resume cpu_start = ktime elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)): # end of a cpu suspend, start of the next m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) cpu = 'CPU'+m.group('cpu') if(cpu not in actions): actions[cpu] = [] actions[cpu].append({'begin': cpu_start, 'end': ktime}) cpu_start = ktime elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)): # end of a cpu resume, start of the next m = re.match('CPU(?P<cpu>[0-9]*) is up', msg) cpu = 'CPU'+m.group('cpu') if(cpu not in actions): actions[cpu] = [] actions[cpu].append({'begin': cpu_start, 'end': ktime}) cpu_start = ktime prevktime = ktime # fill in any missing phases lp = data.phases[0] for p in data.phases: if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0): print('WARNING: phase "%s" is missing, something went wrong!' % p) print(' In %s, this dmesg line denotes the start of %s:' % \ (sysvals.suspendmode, p)) print(' "%s"' % dm[p]) if(data.dmesg[p]['start'] < 0): data.dmesg[p]['start'] = data.dmesg[lp]['end'] if(p == 'resume_machine'): data.tSuspended = data.dmesg[lp]['end'] data.tResumed = data.dmesg[lp]['end'] data.tLow = 0 if(data.dmesg[p]['end'] < 0): data.dmesg[p]['end'] = data.dmesg[p]['start'] lp = p # fill in any actions we've found for name in actions: for event in actions[name]: begin = event['begin'] end = event['end'] # if event starts before timeline start, expand timeline if(begin < data.start): data.setStart(begin) # if event ends after timeline end, expand the timeline if(end > data.end): data.setEnd(end) data.newActionGlobal(name, begin, end) if(sysvals.verbose): data.printDetails() if(len(sysvals.devicefilter) > 0): data.deviceFilter(sysvals.devicefilter) data.fixupInitcallsThatDidntReturn() return True # Function: setTimelineRows # Description: # Organize the timeline entries into the smallest # number of rows possible, with no entry overlapping # Arguments: # list: the list of devices/actions for a single phase # sortedkeys: cronologically sorted key list to use # Output: # The total number of rows needed to display this phase of the timeline def setTimelineRows(list, sortedkeys): # clear all rows and set them to undefined remaining = len(list) rowdata = dict() row = 0 for item in list: list[item]['row'] = -1 # try to pack each row with as many ranges as possible while(remaining > 0): if(row not in rowdata): rowdata[row] = [] for item in sortedkeys: if(list[item]['row'] < 0): s = list[item]['start'] e = list[item]['end'] valid = True for ritem in rowdata[row]: rs = ritem['start'] re = ritem['end'] if(not (((s <= rs) and (e <= rs)) or ((s >= re) and (e >= re)))): valid = False break if(valid): rowdata[row].append(list[item]) list[item]['row'] = row remaining -= 1 row += 1 return row # Function: createTimeScale # Description: # Create the timescale header for the html timeline # Arguments: # t0: start time (suspend begin) # tMax: end time (resume end) # tSuspend: time when suspend occurs, i.e. the zero time # Output: # The html code needed to display the time scale def createTimeScale(t0, tMax, tSuspended): timescale = '<div class="t" style="right:{0}%">{1}</div>\n' output = '<div id="timescale">\n' # set scale for timeline tTotal = tMax - t0 tS = 0.1 if(tTotal <= 0): return output if(tTotal > 4): tS = 1 if(tSuspended < 0): for i in range(int(tTotal/tS)+1): pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal)) if(i > 0): val = '%0.fms' % (float(i)*tS*1000) else: val = '' output += timescale.format(pos, val) else: tSuspend = tSuspended - t0 divTotal = int(tTotal/tS) + 1 divSuspend = int(tSuspend/tS) s0 = (tSuspend - tS*divSuspend)*100/tTotal for i in range(divTotal): pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0) if((i == 0) and (s0 < 3)): val = '' elif(i == divSuspend): val = 'S/R' else: val = '%0.fms' % (float(i-divSuspend)*tS*1000) output += timescale.format(pos, val) output += '</div>\n' return output # Function: createHTMLSummarySimple # Description: # Create summary html file for a series of tests # Arguments: # testruns: array of Data objects from parseTraceLog def createHTMLSummarySimple(testruns, htmlfile): global sysvals # print out the basic summary of all the tests hf = open(htmlfile, 'w') # write the html header first (html head, css code, up to body start) html = '<!DOCTYPE html>\n<html>\n<head>\n\ <meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\ <title>AnalyzeSuspend Summary</title>\n\ <style type=\'text/css\'>\n\ body {overflow-y: scroll;}\n\ .stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\ table {width:100%;border-collapse: collapse;}\n\ .summary {font: 22px Arial;border:1px solid;}\n\ th {border: 1px solid black;background-color:#A7C942;color:white;}\n\ td {text-align: center;}\n\ tr.alt td {background-color:#EAF2D3;}\n\ tr.avg td {background-color:#BDE34C;}\n\ a:link {color: #90B521;}\n\ a:visited {color: #495E09;}\n\ a:hover {color: #B1DF28;}\n\ a:active {color: #FFFFFF;}\n\ </style>\n</head>\n<body>\n' # group test header count = len(testruns) headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n' html += headline_stamp.format(sysvals.stamp['host'], sysvals.stamp['kernel'], sysvals.stamp['mode'], sysvals.stamp['time'], count) # check to see if all the tests have the same value stampcolumns = False for data in testruns: if diffStamp(sysvals.stamp, data.stamp): stampcolumns = True break th = '\t<th>{0}</th>\n' td = '\t<td>{0}</td>\n' tdlink = '\t<td><a href="{0}">Click Here</a></td>\n' # table header html += '<table class="summary">\n<tr>\n' html += th.format("Test #") if stampcolumns: html += th.format("Hostname") html += th.format("Kernel Version") html += th.format("Suspend Mode") html += th.format("Test Time") html += th.format("Suspend Time") html += th.format("Resume Time") html += th.format("Detail") html += '</tr>\n' # test data, 1 row per test sTimeAvg = 0.0 rTimeAvg = 0.0 num = 1 for data in testruns: # data.end is the end of post_resume resumeEnd = data.dmesg['resume_complete']['end'] if num % 2 == 1: html += '<tr class="alt">\n' else: html += '<tr>\n' # test num html += td.format("test %d" % num) num += 1 if stampcolumns: # host name val = "unknown" if('host' in data.stamp): val = data.stamp['host'] html += td.format(val) # host kernel val = "unknown" if('kernel' in data.stamp): val = data.stamp['kernel'] html += td.format(val) # suspend mode val = "unknown" if('mode' in data.stamp): val = data.stamp['mode'] html += td.format(val) # test time val = "unknown" if('time' in data.stamp): val = data.stamp['time'] html += td.format(val) # suspend time sTime = (data.tSuspended - data.start)*1000 sTimeAvg += sTime html += td.format("%3.3f ms" % sTime) # resume time rTime = (resumeEnd - data.tResumed)*1000 rTimeAvg += rTime html += td.format("%3.3f ms" % rTime) # link to the output html html += tdlink.format(data.outfile) html += '</tr>\n' # last line: test average if(count > 0): sTimeAvg /= count rTimeAvg /= count html += '<tr class="avg">\n' html += td.format('Average') # name if stampcolumns: html += td.format('') # host html += td.format('') # kernel html += td.format('') # mode html += td.format('') # time html += td.format("%3.3f ms" % sTimeAvg) # suspend time html += td.format("%3.3f ms" % rTimeAvg) # resume time html += td.format('') # output link html += '</tr>\n' # flush the data to file hf.write(html+'</table>\n') hf.write('</body>\n</html>\n') hf.close() # Function: createHTML # Description: # Create the output html file from the resident test data # Arguments: # testruns: array of Data objects from parseKernelLog or parseTraceLog # Output: # True if the html file was created, false if it failed def createHTML(testruns): global sysvals for data in testruns: data.normalizeTime(testruns[-1].tSuspended) x2changes = ['', 'absolute'] if len(testruns) > 1: x2changes = ['1', 'relative'] # html function templates headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n' html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0] html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n' html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n' html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n' html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n' html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n' html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n' html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n' html_legend = '<div class="square" style="left:{0}%;background-color:{1}">&nbsp;{2}</div>\n' html_timetotal = '<table class="time1">\n<tr>'\ '<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\ '<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\ '</tr>\n</table>\n' html_timetotal2 = '<table class="time1">\n<tr>'\ '<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\ '<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\ '<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\ '</tr>\n</table>\n' html_timegroups = '<table class="time2">\n<tr>'\ '<td class="green">{4}Kernel Suspend: {0} ms</td>'\ '<td class="purple">{4}Firmware Suspend: {1} ms</td>'\ '<td class="purple">{4}Firmware Resume: {2} ms</td>'\ '<td class="yellow">{4}Kernel Resume: {3} ms</td>'\ '</tr>\n</table>\n' # device timeline vprint('Creating Device Timeline...') devtl = Timeline() # Generate the header for this timeline textnum = ['First', 'Second'] for data in testruns: tTotal = data.end - data.start tEnd = data.dmesg['resume_complete']['end'] if(tTotal == 0): print('ERROR: No timeline data') sys.exit() if(data.tLow > 0): low_time = '%.0f'%(data.tLow*1000) if data.fwValid: suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \ (data.fwSuspend/1000000.0)) resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \ (data.fwResume/1000000.0)) testdesc1 = 'Total' testdesc2 = '' if(len(testruns) > 1): testdesc1 = testdesc2 = textnum[data.testnumber] testdesc2 += ' ' if(data.tLow == 0): thtml = html_timetotal.format(suspend_time, \ resume_time, testdesc1) else: thtml = html_timetotal2.format(suspend_time, low_time, \ resume_time, testdesc1) devtl.html['timeline'] += thtml sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \ data.getStart())*1000) sftime = '%.3f'%(data.fwSuspend / 1000000.0) rftime = '%.3f'%(data.fwResume / 1000000.0) rktime = '%.3f'%((data.getEnd() - \ data.dmesg['resume_machine']['start'])*1000) devtl.html['timeline'] += html_timegroups.format(sktime, \ sftime, rftime, rktime, testdesc2) else: suspend_time = '%.0f'%((data.tSuspended-data.start)*1000) resume_time = '%.0f'%((tEnd-data.tSuspended)*1000) testdesc = 'Kernel' if(len(testruns) > 1): testdesc = textnum[data.testnumber]+' '+testdesc if(data.tLow == 0): thtml = html_timetotal.format(suspend_time, \ resume_time, testdesc) else: thtml = html_timetotal2.format(suspend_time, low_time, \ resume_time, testdesc) devtl.html['timeline'] += thtml # time scale for potentially multiple datasets t0 = testruns[0].start tMax = testruns[-1].end tSuspended = testruns[-1].tSuspended tTotal = tMax - t0 # determine the maximum number of rows we need to draw timelinerows = 0 for data in testruns: for phase in data.dmesg: list = data.dmesg[phase]['list'] rows = setTimelineRows(list, list) data.dmesg[phase]['row'] = rows if(rows > timelinerows): timelinerows = rows # calculate the timeline height and create bounding box, add buttons devtl.setRows(timelinerows + 1) devtl.html['timeline'] += html_devlist1 if len(testruns) > 1: devtl.html['timeline'] += html_devlist2 devtl.html['timeline'] += html_zoombox devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height) # draw the colored boxes for each of the phases for data in testruns: for b in data.dmesg: phase = data.dmesg[b] length = phase['end']-phase['start'] left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal) width = '%.3f' % ((length*100.0)/tTotal) devtl.html['timeline'] += html_phase.format(left, width, \ '%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \ data.dmesg[b]['color'], '') # draw the time scale, try to make the number of labels readable devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended) devtl.html['timeline'] += devtl.html['scale'] for data in testruns: for b in data.dmesg: phaselist = data.dmesg[b]['list'] for d in phaselist: name = d drv = '' dev = phaselist[d] if(d in sysvals.altdevname): name = sysvals.altdevname[d] if('drv' in dev and dev['drv']): drv = ' {%s}' % dev['drv'] height = (100.0 - devtl.scaleH)/data.dmesg[b]['row'] top = '%.3f' % ((dev['row']*height) + devtl.scaleH) left = '%.3f' % (((dev['start']-t0)*100)/tTotal) width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal) length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000) color = 'rgba(204,204,204,0.5)' devtl.html['timeline'] += html_device.format(dev['id'], \ d+drv+length+b, left, top, '%.3f'%height, width, name+drv) # draw any trace events found for data in testruns: for b in data.dmesg: phaselist = data.dmesg[b]['list'] for name in phaselist: dev = phaselist[name] if('traceevents' in dev): vprint('Debug trace events found for device %s' % name) vprint('%20s %20s %10s %8s' % ('action', \ 'name', 'time(ms)', 'length(ms)')) for e in dev['traceevents']: vprint('%20s %20s %10.3f %8.3f' % (e.action, \ e.name, e.time*1000, e.length*1000)) height = (100.0 - devtl.scaleH)/data.dmesg[b]['row'] top = '%.3f' % ((dev['row']*height) + devtl.scaleH) left = '%.3f' % (((e.time-t0)*100)/tTotal) width = '%.3f' % (e.length*100/tTotal) color = 'rgba(204,204,204,0.5)' devtl.html['timeline'] += \ html_traceevent.format(e.action+' '+e.name, \ left, top, '%.3f'%height, \ width, e.color, '') # timeline is finished devtl.html['timeline'] += '</div>\n</div>\n' # draw a legend which describes the phases by color data = testruns[-1] devtl.html['legend'] = '<div class="legend">\n' pdelta = 100.0/len(data.phases) pmargin = pdelta / 4.0 for phase in data.phases: order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin) name = string.replace(phase, '_', ' &nbsp;') devtl.html['legend'] += html_legend.format(order, \ data.dmesg[phase]['color'], name) devtl.html['legend'] += '</div>\n' hf = open(sysvals.htmlfile, 'w') thread_height = 0 # write the html header first (html head, css code, up to body start) html_header = '<!DOCTYPE html>\n<html>\n<head>\n\ <meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\ <title>AnalyzeSuspend</title>\n\ <style type=\'text/css\'>\n\ body {overflow-y: scroll;}\n\ .stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\ .callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\ .callgraph article * {padding-left: 28px;}\n\ h1 {color:black;font: bold 30px Times;}\n\ t0 {color:black;font: bold 30px Times;}\n\ t1 {color:black;font: 30px Times;}\n\ t2 {color:black;font: 25px Times;}\n\ t3 {color:black;font: 20px Times;white-space:nowrap;}\n\ t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\ table {width:100%;}\n\ .gray {background-color:rgba(80,80,80,0.1);}\n\ .green {background-color:rgba(204,255,204,0.4);}\n\ .purple {background-color:rgba(128,0,128,0.2);}\n\ .yellow {background-color:rgba(255,255,204,0.4);}\n\ .time1 {font: 22px Arial;border:1px solid;}\n\ .time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\ td {text-align: center;}\n\ r {color:#500000;font:15px Tahoma;}\n\ n {color:#505050;font:15px Tahoma;}\n\ .tdhl {color: red;}\n\ .hide {display: none;}\n\ .pf {display: none;}\n\ .pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\ .pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\ .pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\ .zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\ .timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\ .thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\ .thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\ .hover {background-color:white;border:1px solid red;z-index:10;}\n\ .traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\ .phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\ .phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\ .t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\ .legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\ .legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\ button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\ .devlist {position:'+x2changes[1]+';width:190px;}\n\ #devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\ </style>\n</head>\n<body>\n' hf.write(html_header) # write the test title and general info header if(sysvals.stamp['time'] != ""): hf.write(headline_stamp.format(sysvals.stamp['host'], sysvals.stamp['kernel'], sysvals.stamp['mode'], \ sysvals.stamp['time'])) # write the device timeline hf.write(devtl.html['timeline']) hf.write(devtl.html['legend']) hf.write('<div id="devicedetailtitle"></div>\n') hf.write('<div id="devicedetail" style="display:none;">\n') # draw the colored boxes for the device detail section for data in testruns: hf.write('<div id="devicedetail%d">\n' % data.testnumber) for b in data.phases: phase = data.dmesg[b] length = phase['end']-phase['start'] left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal) width = '%.3f' % ((length*100.0)/tTotal) hf.write(html_phaselet.format(b, left, width, \ data.dmesg[b]['color'])) hf.write('</div>\n') hf.write('</div>\n') # write the ftrace data (callgraph) data = testruns[-1] if(sysvals.usecallgraph): hf.write('<section id="callgraphs" class="callgraph">\n') # write out the ftrace data converted to html html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n' html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n' html_func_end = '</article>\n' html_func_leaf = '<article>{0} {1}</article>\n' num = 0 for p in data.phases: list = data.dmesg[p]['list'] for devname in data.sortedDevices(p): if('ftrace' not in list[devname]): continue name = devname if(devname in sysvals.altdevname): name = sysvals.altdevname[devname] devid = list[devname]['id'] cg = list[devname]['ftrace'] flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \ ((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000) hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \ num, name+' '+p, flen)) num += 1 for line in cg.list: if(line.length < 0.000000001): flen = '' else: flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \ line.time*1000) if(line.freturn and line.fcall): hf.write(html_func_leaf.format(line.name, flen)) elif(line.freturn): hf.write(html_func_end) else: hf.write(html_func_start.format(num, line.name, flen)) num += 1 hf.write(html_func_end) hf.write('\n\n </section>\n') # write the footer and close addScriptCode(hf, testruns) hf.write('</body>\n</html>\n') hf.close() return True # Function: addScriptCode # Description: # Adds the javascript code to the output html # Arguments: # hf: the open html file pointer # testruns: array of Data objects from parseKernelLog or parseTraceLog def addScriptCode(hf, testruns): t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000 tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000 # create an array in javascript memory with the device details detail = ' var devtable = [];\n' for data in testruns: topo = data.deviceTopology() detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo) detail += ' var bounds = [%f,%f];\n' % (t0, tMax) # add the code which will manipulate the data in the browser script_code = \ '<script type="text/javascript">\n'+detail+\ ' function zoomTimeline() {\n'\ ' var timescale = document.getElementById("timescale");\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var zoombox = document.getElementById("dmesgzoombox");\n'\ ' var val = parseFloat(dmesg.style.width);\n'\ ' var newval = 100;\n'\ ' var sh = window.outerWidth / 2;\n'\ ' if(this.id == "zoomin") {\n'\ ' newval = val * 1.2;\n'\ ' if(newval > 40000) newval = 40000;\n'\ ' dmesg.style.width = newval+"%";\n'\ ' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\ ' } else if (this.id == "zoomout") {\n'\ ' newval = val / 1.2;\n'\ ' if(newval < 100) newval = 100;\n'\ ' dmesg.style.width = newval+"%";\n'\ ' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\ ' } else {\n'\ ' zoombox.scrollLeft = 0;\n'\ ' dmesg.style.width = "100%";\n'\ ' }\n'\ ' var html = "";\n'\ ' var t0 = bounds[0];\n'\ ' var tMax = bounds[1];\n'\ ' var tTotal = tMax - t0;\n'\ ' var wTotal = tTotal * 100.0 / newval;\n'\ ' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\ ' if(tS < 1) tS = 1;\n'\ ' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\ ' var pos = (tMax - s) * 100.0 / tTotal;\n'\ ' var name = (s == 0)?"S/R":(s+"ms");\n'\ ' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\ ' }\n'\ ' timescale.innerHTML = html;\n'\ ' }\n'\ ' function deviceHover() {\n'\ ' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' var cpu = -1;\n'\ ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(7));\n'\ ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(8));\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\ ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\ ' (name == dname))\n'\ ' {\n'\ ' dev[i].className = "thread hover";\n'\ ' } else {\n'\ ' dev[i].className = "thread";\n'\ ' }\n'\ ' }\n'\ ' }\n'\ ' function deviceUnhover() {\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dev[i].className = "thread";\n'\ ' }\n'\ ' }\n'\ ' function deviceTitle(title, total, cpu) {\n'\ ' var prefix = "Total";\n'\ ' if(total.length > 3) {\n'\ ' prefix = "Average";\n'\ ' total[1] = (total[1]+total[3])/2;\n'\ ' total[2] = (total[2]+total[4])/2;\n'\ ' }\n'\ ' var devtitle = document.getElementById("devicedetailtitle");\n'\ ' var name = title.slice(0, title.indexOf(" "));\n'\ ' if(cpu >= 0) name = "CPU"+cpu;\n'\ ' var driver = "";\n'\ ' var tS = "<t2>(</t2>";\n'\ ' var tR = "<t2>)</t2>";\n'\ ' if(total[1] > 0)\n'\ ' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\ ' if(total[2] > 0)\n'\ ' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\ ' var s = title.indexOf("{");\n'\ ' var e = title.indexOf("}");\n'\ ' if((s >= 0) && (e >= 0))\n'\ ' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\ ' if(total[1] > 0 && total[2] > 0)\n'\ ' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\ ' else\n'\ ' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\ ' return name;\n'\ ' }\n'\ ' function deviceDetail() {\n'\ ' var devinfo = document.getElementById("devicedetail");\n'\ ' devinfo.style.display = "block";\n'\ ' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\ ' var cpu = -1;\n'\ ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(7));\n'\ ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(8));\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' var idlist = [];\n'\ ' var pdata = [[]];\n'\ ' var pd = pdata[0];\n'\ ' var total = [0.0, 0.0, 0.0];\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\ ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\ ' (name == dname))\n'\ ' {\n'\ ' idlist[idlist.length] = dev[i].id;\n'\ ' var tidx = 1;\n'\ ' if(dev[i].id[0] == "a") {\n'\ ' pd = pdata[0];\n'\ ' } else {\n'\ ' if(pdata.length == 1) pdata[1] = [];\n'\ ' if(total.length == 3) total[3]=total[4]=0.0;\n'\ ' pd = pdata[1];\n'\ ' tidx = 3;\n'\ ' }\n'\ ' var info = dev[i].title.split(" ");\n'\ ' var pname = info[info.length-1];\n'\ ' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\ ' total[0] += pd[pname];\n'\ ' if(pname.indexOf("suspend") >= 0)\n'\ ' total[tidx] += pd[pname];\n'\ ' else\n'\ ' total[tidx+1] += pd[pname];\n'\ ' }\n'\ ' }\n'\ ' var devname = deviceTitle(this.title, total, cpu);\n'\ ' var left = 0.0;\n'\ ' for (var t = 0; t < pdata.length; t++) {\n'\ ' pd = pdata[t];\n'\ ' devinfo = document.getElementById("devicedetail"+t);\n'\ ' var phases = devinfo.getElementsByClassName("phaselet");\n'\ ' for (var i = 0; i < phases.length; i++) {\n'\ ' if(phases[i].id in pd) {\n'\ ' var w = 100.0*pd[phases[i].id]/total[0];\n'\ ' var fs = 32;\n'\ ' if(w < 8) fs = 4*w | 0;\n'\ ' var fs2 = fs*3/4;\n'\ ' phases[i].style.width = w+"%";\n'\ ' phases[i].style.left = left+"%";\n'\ ' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\ ' left += w;\n'\ ' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\ ' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\ ' phases[i].innerHTML = time+pname;\n'\ ' } else {\n'\ ' phases[i].style.width = "0%";\n'\ ' phases[i].style.left = left+"%";\n'\ ' }\n'\ ' }\n'\ ' }\n'\ ' var cglist = document.getElementById("callgraphs");\n'\ ' if(!cglist) return;\n'\ ' var cg = cglist.getElementsByClassName("atop");\n'\ ' for (var i = 0; i < cg.length; i++) {\n'\ ' if(idlist.indexOf(cg[i].id) >= 0) {\n'\ ' cg[i].style.display = "block";\n'\ ' } else {\n'\ ' cg[i].style.display = "none";\n'\ ' }\n'\ ' }\n'\ ' }\n'\ ' function devListWindow(e) {\n'\ ' var sx = e.clientX;\n'\ ' if(sx > window.innerWidth - 440)\n'\ ' sx = window.innerWidth - 440;\n'\ ' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\ ' var win = window.open("", "_blank", cfg);\n'\ ' if(window.chrome) win.moveBy(sx, 0);\n'\ ' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\ ' "<style type=\\"text/css\\">"+\n'\ ' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\ ' "</style>"\n'\ ' var dt = devtable[0];\n'\ ' if(e.target.id != "devlist1")\n'\ ' dt = devtable[1];\n'\ ' win.document.write(html+dt);\n'\ ' }\n'\ ' window.addEventListener("load", function () {\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' dmesg.style.width = "100%"\n'\ ' document.getElementById("zoomin").onclick = zoomTimeline;\n'\ ' document.getElementById("zoomout").onclick = zoomTimeline;\n'\ ' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\ ' var devlist = document.getElementsByClassName("devlist");\n'\ ' for (var i = 0; i < devlist.length; i++)\n'\ ' devlist[i].onclick = devListWindow;\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dev[i].onclick = deviceDetail;\n'\ ' dev[i].onmouseover = deviceHover;\n'\ ' dev[i].onmouseout = deviceUnhover;\n'\ ' }\n'\ ' zoomTimeline();\n'\ ' });\n'\ '</script>\n' hf.write(script_code); # Function: executeSuspend # Description: # Execute system suspend through the sysfs interface, then copy the output # dmesg and ftrace files to the test output directory. def executeSuspend(): global sysvals detectUSB(False) t0 = time.time()*1000 tp = sysvals.tpath # execute however many s/r runs requested for count in range(1,sysvals.execcount+1): # clear the kernel ring buffer just as we start os.system('dmesg -C') # enable callgraph ftrace only for the second run if(sysvals.usecallgraph and count == 2): # set trace type os.system('echo function_graph > '+tp+'current_tracer') os.system('echo "" > '+tp+'set_ftrace_filter') # set trace format options os.system('echo funcgraph-abstime > '+tp+'trace_options') os.system('echo funcgraph-proc > '+tp+'trace_options') # focus only on device suspend and resume os.system('cat '+tp+'available_filter_functions | '+\ 'grep dpm_run_callback > '+tp+'set_graph_function') # if this is test2 and there's a delay, start here if(count > 1 and sysvals.x2delay > 0): tN = time.time()*1000 while (tN - t0) < sysvals.x2delay: tN = time.time()*1000 time.sleep(0.001) # start ftrace if(sysvals.usecallgraph or sysvals.usetraceevents): print('START TRACING') os.system('echo 1 > '+tp+'tracing_on') # initiate suspend if(sysvals.usecallgraph or sysvals.usetraceevents): os.system('echo SUSPEND START > '+tp+'trace_marker') if(sysvals.rtcwake): print('SUSPEND START') print('will autoresume in %d seconds' % sysvals.rtcwaketime) sysvals.rtcWakeAlarm() else: print('SUSPEND START (press a key to resume)') pf = open(sysvals.powerfile, 'w') pf.write(sysvals.suspendmode) # execution will pause here pf.close() t0 = time.time()*1000 # return from suspend print('RESUME COMPLETE') if(sysvals.usecallgraph or sysvals.usetraceevents): os.system('echo RESUME COMPLETE > '+tp+'trace_marker') # see if there's firmware timing data to be had t = sysvals.postresumetime if(t > 0): print('Waiting %d seconds for POST-RESUME trace events...' % t) time.sleep(t) # stop ftrace if(sysvals.usecallgraph or sysvals.usetraceevents): os.system('echo 0 > '+tp+'tracing_on') print('CAPTURING TRACE') writeDatafileHeader(sysvals.ftracefile) os.system('cat '+tp+'trace >> '+sysvals.ftracefile) os.system('echo "" > '+tp+'trace') # grab a copy of the dmesg output print('CAPTURING DMESG') writeDatafileHeader(sysvals.dmesgfile) os.system('dmesg -c >> '+sysvals.dmesgfile) def writeDatafileHeader(filename): global sysvals fw = getFPDT(False) prt = sysvals.postresumetime fp = open(filename, 'a') fp.write(sysvals.teststamp+'\n') if(fw): fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1])) if(prt > 0): fp.write('# post resume time %u\n' % prt) fp.close() # Function: executeAndroidSuspend # Description: # Execute system suspend through the sysfs interface # on a remote android device, then transfer the output # dmesg and ftrace files to the local output directory. def executeAndroidSuspend(): global sysvals # check to see if the display is currently off tp = sysvals.tpath out = os.popen(sysvals.adb+\ ' shell dumpsys power | grep mScreenOn').read().strip() # if so we need to turn it on so we can issue a new suspend if(out.endswith('false')): print('Waking the device up for the test...') # send the KEYPAD_POWER keyevent to wake it up os.system(sysvals.adb+' shell input keyevent 26') # wait a few seconds so the user can see the device wake up time.sleep(3) # execute however many s/r runs requested for count in range(1,sysvals.execcount+1): # clear the kernel ring buffer just as we start os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1') # start ftrace if(sysvals.usetraceevents): print('START TRACING') os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'") # initiate suspend for count in range(1,sysvals.execcount+1): if(sysvals.usetraceevents): os.system(sysvals.adb+\ " shell 'echo SUSPEND START > "+tp+"trace_marker'") print('SUSPEND START (press a key on the device to resume)') os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\ " > "+sysvals.powerfile+"'") # execution will pause here, then adb will exit while(True): check = os.popen(sysvals.adb+\ ' shell pwd 2>/dev/null').read().strip() if(len(check) > 0): break time.sleep(1) if(sysvals.usetraceevents): os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\ "trace_marker'") # return from suspend print('RESUME COMPLETE') # stop ftrace if(sysvals.usetraceevents): os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'") print('CAPTURING TRACE') os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile) os.system(sysvals.adb+' shell cat '+tp+\ 'trace >> '+sysvals.ftracefile) # grab a copy of the dmesg output print('CAPTURING DMESG') os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile) os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile) # Function: setUSBDevicesAuto # Description: # Set the autosuspend control parameter of all USB devices to auto # This can be dangerous, so use at your own risk, most devices are set # to always-on since the kernel cant determine if the device can # properly autosuspend def setUSBDevicesAuto(): global sysvals rootCheck() for dirname, dirnames, filenames in os.walk('/sys/devices'): if(re.match('.*/usb[0-9]*.*', dirname) and 'idVendor' in filenames and 'idProduct' in filenames): os.system('echo auto > %s/power/control' % dirname) name = dirname.split('/')[-1] desc = os.popen('cat %s/product 2>/dev/null' % \ dirname).read().replace('\n', '') ctrl = os.popen('cat %s/power/control 2>/dev/null' % \ dirname).read().replace('\n', '') print('control is %s for %6s: %s' % (ctrl, name, desc)) # Function: yesno # Description: # Print out an equivalent Y or N for a set of known parameter values # Output: # 'Y', 'N', or ' ' if the value is unknown def yesno(val): yesvals = ['auto', 'enabled', 'active', '1'] novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported'] if val in yesvals: return 'Y' elif val in novals: return 'N' return ' ' # Function: ms2nice # Description: # Print out a very concise time string in minutes and seconds # Output: # The time string, e.g. "1901m16s" def ms2nice(val): ms = 0 try: ms = int(val) except: return 0.0 m = ms / 60000 s = (ms / 1000) - (m * 60) return '%3dm%2ds' % (m, s) # Function: detectUSB # Description: # Detect all the USB hosts and devices currently connected and add # a list of USB device names to sysvals for better timeline readability # Arguments: # output: True to output the info to stdout, False otherwise def detectUSB(output): global sysvals field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''} power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'', 'control':'', 'persist':'', 'runtime_enabled':'', 'runtime_status':'', 'runtime_usage':'', 'runtime_active_time':'', 'runtime_suspended_time':'', 'active_duration':'', 'connected_duration':''} if(output): print('LEGEND') print('---------------------------------------------------------------------------------------------') print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)') print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)') print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)') print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)') print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)') print(' U = runtime usage count') print('---------------------------------------------------------------------------------------------') print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT') print('---------------------------------------------------------------------------------------------') for dirname, dirnames, filenames in os.walk('/sys/devices'): if(re.match('.*/usb[0-9]*.*', dirname) and 'idVendor' in filenames and 'idProduct' in filenames): for i in field: field[i] = os.popen('cat %s/%s 2>/dev/null' % \ (dirname, i)).read().replace('\n', '') name = dirname.split('/')[-1] if(len(field['product']) > 0): sysvals.altdevname[name] = \ '%s [%s]' % (field['product'], name) else: sysvals.altdevname[name] = \ '%s:%s [%s]' % (field['idVendor'], \ field['idProduct'], name) if(output): for i in power: power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \ (dirname, i)).read().replace('\n', '') if(re.match('usb[0-9]*', name)): first = '%-8s' % name else: first = '%8s' % name print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \ (first, field['idVendor'], field['idProduct'], \ field['product'][0:20], field['speed'], \ yesno(power['async']), \ yesno(power['control']), \ yesno(power['persist']), \ yesno(power['runtime_enabled']), \ yesno(power['runtime_status']), \ power['runtime_usage'], \ power['autosuspend'], \ ms2nice(power['runtime_active_time']), \ ms2nice(power['runtime_suspended_time']), \ ms2nice(power['active_duration']), \ ms2nice(power['connected_duration']))) # Function: getModes # Description: # Determine the supported power modes on this system # Output: # A string list of the available modes def getModes(): global sysvals modes = '' if(not sysvals.android): if(os.path.exists(sysvals.powerfile)): fp = open(sysvals.powerfile, 'r') modes = string.split(fp.read()) fp.close() else: line = os.popen(sysvals.adb+' shell cat '+\ sysvals.powerfile).read().strip() modes = string.split(line) return modes # Function: getFPDT # Description: # Read the acpi bios tables and pull out FPDT, the firmware data # Arguments: # output: True to output the info to stdout, False otherwise def getFPDT(output): global sysvals rectype = {} rectype[0] = 'Firmware Basic Boot Performance Record' rectype[1] = 'S3 Performance Table Record' prectype = {} prectype[0] = 'Basic S3 Resume Performance Record' prectype[1] = 'Basic S3 Suspend Performance Record' rootCheck() if(not os.path.exists(sysvals.fpdtpath)): if(output): doError('file doesnt exist: %s' % sysvals.fpdtpath, False) return False if(not os.access(sysvals.fpdtpath, os.R_OK)): if(output): doError('file isnt readable: %s' % sysvals.fpdtpath, False) return False if(not os.path.exists(sysvals.mempath)): if(output): doError('file doesnt exist: %s' % sysvals.mempath, False) return False if(not os.access(sysvals.mempath, os.R_OK)): if(output): doError('file isnt readable: %s' % sysvals.mempath, False) return False fp = open(sysvals.fpdtpath, 'rb') buf = fp.read() fp.close() if(len(buf) < 36): if(output): doError('Invalid FPDT table data, should '+\ 'be at least 36 bytes', False) return False table = struct.unpack('4sIBB6s8sI4sI', buf[0:36]) if(output): print('') print('Firmware Performance Data Table (%s)' % table[0]) print(' Signature : %s' % table[0]) print(' Table Length : %u' % table[1]) print(' Revision : %u' % table[2]) print(' Checksum : 0x%x' % table[3]) print(' OEM ID : %s' % table[4]) print(' OEM Table ID : %s' % table[5]) print(' OEM Revision : %u' % table[6]) print(' Creator ID : %s' % table[7]) print(' Creator Revision : 0x%x' % table[8]) print('') if(table[0] != 'FPDT'): if(output): doError('Invalid FPDT table') return False if(len(buf) <= 36): return False i = 0 fwData = [0, 0] records = buf[36:] fp = open(sysvals.mempath, 'rb') while(i < len(records)): header = struct.unpack('HBB', records[i:i+4]) if(header[0] not in rectype): continue if(header[1] != 16): continue addr = struct.unpack('Q', records[i+8:i+16])[0] try: fp.seek(addr) first = fp.read(8) except: doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False) rechead = struct.unpack('4sI', first) recdata = fp.read(rechead[1]-8) if(rechead[0] == 'FBPT'): record = struct.unpack('HBBIQQQQQ', recdata) if(output): print('%s (%s)' % (rectype[header[0]], rechead[0])) print(' Reset END : %u ns' % record[4]) print(' OS Loader LoadImage Start : %u ns' % record[5]) print(' OS Loader StartImage Start : %u ns' % record[6]) print(' ExitBootServices Entry : %u ns' % record[7]) print(' ExitBootServices Exit : %u ns' % record[8]) elif(rechead[0] == 'S3PT'): if(output): print('%s (%s)' % (rectype[header[0]], rechead[0])) j = 0 while(j < len(recdata)): prechead = struct.unpack('HBB', recdata[j:j+4]) if(prechead[0] not in prectype): continue if(prechead[0] == 0): record = struct.unpack('IIQQ', recdata[j:j+prechead[1]]) fwData[1] = record[2] if(output): print(' %s' % prectype[prechead[0]]) print(' Resume Count : %u' % \ record[1]) print(' FullResume : %u ns' % \ record[2]) print(' AverageResume : %u ns' % \ record[3]) elif(prechead[0] == 1): record = struct.unpack('QQ', recdata[j+4:j+prechead[1]]) fwData[0] = record[1] - record[0] if(output): print(' %s' % prectype[prechead[0]]) print(' SuspendStart : %u ns' % \ record[0]) print(' SuspendEnd : %u ns' % \ record[1]) print(' SuspendTime : %u ns' % \ fwData[0]) j += prechead[1] if(output): print('') i += header[1] fp.close() return fwData # Function: statusCheck # Description: # Verify that the requested command and options will work, and # print the results to the terminal # Output: # True if the test will work, False if not def statusCheck(): global sysvals status = True if(sysvals.android): print('Checking the android system ...') else: print('Checking this system (%s)...' % platform.node()) # check if adb is connected to a device if(sysvals.android): res = 'NO' out = os.popen(sysvals.adb+' get-state').read().strip() if(out == 'device'): res = 'YES' print(' is android device connected: %s' % res) if(res != 'YES'): print(' Please connect the device before using this tool') return False # check we have root access res = 'NO (No features of this tool will work!)' if(sysvals.android): out = os.popen(sysvals.adb+' shell id').read().strip() if('root' in out): res = 'YES' else: if(os.environ['USER'] == 'root'): res = 'YES' print(' have root access: %s' % res) if(res != 'YES'): if(sysvals.android): print(' Try running "adb root" to restart the daemon as root') else: print(' Try running this script with sudo') return False # check sysfs is mounted res = 'NO (No features of this tool will work!)' if(sysvals.android): out = os.popen(sysvals.adb+' shell ls '+\ sysvals.powerfile).read().strip() if(out == sysvals.powerfile): res = 'YES' else: if(os.path.exists(sysvals.powerfile)): res = 'YES' print(' is sysfs mounted: %s' % res) if(res != 'YES'): return False # check target mode is a valid mode res = 'NO' modes = getModes() if(sysvals.suspendmode in modes): res = 'YES' else: status = False print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res)) if(res == 'NO'): print(' valid power modes are: %s' % modes) print(' please choose one with -m') # check if the tool can unlock the device if(sysvals.android): res = 'YES' out1 = os.popen(sysvals.adb+\ ' shell dumpsys power | grep mScreenOn').read().strip() out2 = os.popen(sysvals.adb+\ ' shell input').read().strip() if(not out1.startswith('mScreenOn') or not out2.startswith('usage')): res = 'NO (wake the android device up before running the test)' print(' can I unlock the screen: %s' % res) # check if ftrace is available res = 'NO' ftgood = verifyFtrace() if(ftgood): res = 'YES' elif(sysvals.usecallgraph): status = False print(' is ftrace supported: %s' % res) # what data source are we using res = 'DMESG' if(ftgood): sysvals.usetraceeventsonly = True sysvals.usetraceevents = False for e in sysvals.traceevents: check = False if(sysvals.android): out = os.popen(sysvals.adb+' shell ls -d '+\ sysvals.epath+e).read().strip() if(out == sysvals.epath+e): check = True else: if(os.path.exists(sysvals.epath+e)): check = True if(not check): sysvals.usetraceeventsonly = False if(e == 'suspend_resume' and check): sysvals.usetraceevents = True if(sysvals.usetraceevents and sysvals.usetraceeventsonly): res = 'FTRACE (all trace events found)' elif(sysvals.usetraceevents): res = 'DMESG and FTRACE (suspend_resume trace event found)' print(' timeline data source: %s' % res) # check if rtcwake res = 'NO' if(sysvals.rtcpath != ''): res = 'YES' elif(sysvals.rtcwake): status = False print(' is rtcwake supported: %s' % res) return status # Function: doError # Description: # generic error function for catastrphic failures # Arguments: # msg: the error message to print # help: True if printHelp should be called after, False otherwise def doError(msg, help): if(help == True): printHelp() print('ERROR: %s\n') % msg sys.exit() # Function: doWarning # Description: # generic warning function for non-catastrophic anomalies # Arguments: # msg: the warning message to print # file: If not empty, a filename to request be sent to the owner for debug def doWarning(msg, file): print('/* %s */') % msg if(file): print('/* For a fix, please send this'+\ ' %s file to <todd.e.brandt@intel.com> */' % file) # Function: rootCheck # Description: # quick check to see if we have root access def rootCheck(): if(os.environ['USER'] != 'root'): doError('This script must be run as root', False) # Function: getArgInt # Description: # pull out an integer argument from the command line with checks def getArgInt(name, args, min, max): try: arg = args.next() except: doError(name+': no argument supplied', True) try: val = int(arg) except: doError(name+': non-integer value given', True) if(val < min or val > max): doError(name+': value should be between %d and %d' % (min, max), True) return val # Function: rerunTest # Description: # generate an output from an existing set of ftrace/dmesg logs def rerunTest(): global sysvals if(sysvals.ftracefile != ''): doesTraceLogHaveTraceEvents() if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly): doError('recreating this html output '+\ 'requires a dmesg file', False) sysvals.setOutputFile() vprint('Output file: %s' % sysvals.htmlfile) print('PROCESSING DATA') if(sysvals.usetraceeventsonly): testruns = parseTraceLog() else: testruns = loadKernelLog() for data in testruns: parseKernelLog(data) if(sysvals.ftracefile != ''): appendIncompleteTraceLog(testruns) createHTML(testruns) # Function: runTest # Description: # execute a suspend/resume, gather the logs, and generate the output def runTest(subdir): global sysvals # prepare for the test if(not sysvals.android): initFtrace() else: initFtraceAndroid() sysvals.initTestOutput(subdir) vprint('Output files:\n %s' % sysvals.dmesgfile) if(sysvals.usecallgraph or sysvals.usetraceevents or sysvals.usetraceeventsonly): vprint(' %s' % sysvals.ftracefile) vprint(' %s' % sysvals.htmlfile) # execute the test if(not sysvals.android): executeSuspend() else: executeAndroidSuspend() # analyze the data and create the html output print('PROCESSING DATA') if(sysvals.usetraceeventsonly): # data for kernels 3.15 or newer is entirely in ftrace testruns = parseTraceLog() else: # data for kernels older than 3.15 is primarily in dmesg testruns = loadKernelLog() for data in testruns: parseKernelLog(data) if(sysvals.usecallgraph or sysvals.usetraceevents): appendIncompleteTraceLog(testruns) createHTML(testruns) # Function: runSummary # Description: # create a summary of tests in a sub-directory def runSummary(subdir, output): global sysvals # get a list of ftrace output files files = [] for dirname, dirnames, filenames in os.walk(subdir): for filename in filenames: if(re.match('.*_ftrace.txt', filename)): files.append("%s/%s" % (dirname, filename)) # process the files in order and get an array of data objects testruns = [] for file in sorted(files): if output: print("Test found in %s" % os.path.dirname(file)) sysvals.ftracefile = file sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt') doesTraceLogHaveTraceEvents() sysvals.usecallgraph = False if not sysvals.usetraceeventsonly: if(not os.path.exists(sysvals.dmesgfile)): print("Skipping %s: not a valid test input" % file) continue else: if output: f = os.path.basename(sysvals.ftracefile) d = os.path.basename(sysvals.dmesgfile) print("\tInput files: %s and %s" % (f, d)) testdata = loadKernelLog() data = testdata[0] parseKernelLog(data) testdata = [data] appendIncompleteTraceLog(testdata) else: if output: print("\tInput file: %s" % os.path.basename(sysvals.ftracefile)) testdata = parseTraceLog() data = testdata[0] data.normalizeTime(data.tSuspended) link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html') data.outfile = link testruns.append(data) createHTMLSummarySimple(testruns, subdir+'/summary.html') # Function: printHelp # Description: # print out the help text def printHelp(): global sysvals modes = getModes() print('') print('AnalyzeSuspend v%.1f' % sysvals.version) print('Usage: sudo analyze_suspend.py <options>') print('') print('Description:') print(' This tool is designed to assist kernel and OS developers in optimizing') print(' their linux stack\'s suspend/resume time. Using a kernel image built') print(' with a few extra options enabled, the tool will execute a suspend and') print(' capture dmesg and ftrace data until resume is complete. This data is') print(' transformed into a device timeline and an optional callgraph to give') print(' a detailed view of which devices/subsystems are taking the most') print(' time in suspend/resume.') print('') print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS') print(' HTML output: <hostname>_<mode>.html') print(' raw dmesg output: <hostname>_<mode>_dmesg.txt') print(' raw ftrace output: <hostname>_<mode>_ftrace.txt') print('') print('Options:') print(' [general]') print(' -h Print this help text') print(' -v Print the current tool version') print(' -verbose Print extra information during execution and analysis') print(' -status Test to see if the system is enabled to run this tool') print(' -modes List available suspend modes') print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode) print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)') print(' [advanced]') print(' -f Use ftrace to create device callgraphs (default: disabled)') print(' -filter "d1 d2 ..." Filter out all but this list of dev names') print(' -x2 Run two suspend/resumes back to back (default: disabled)') print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)') print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)') print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will') print(' be created in a new subdirectory with a summary page.') print(' [utilities]') print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table') print(' -usbtopo Print out the current USB topology with power info') print(' -usbauto Enable autosuspend for all connected USB devices') print(' [android testing]') print(' -adb binary Use the given adb binary to run the test on an android device.') print(' The device should already be connected and with root access.') print(' Commands will be executed on the device using "adb shell"') print(' [re-analyze data from previous runs]') print(' -ftrace ftracefile Create HTML output using ftrace input') print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)') print(' -summary directory Create a summary of all test in this dir') print('') return True # ----------------- MAIN -------------------- # exec start (skipped if script is loaded as library) if __name__ == '__main__': cmd = '' cmdarg = '' multitest = {'run': False, 'count': 0, 'delay': 0} # loop through the command line arguments args = iter(sys.argv[1:]) for arg in args: if(arg == '-m'): try: val = args.next() except: doError('No mode supplied', True) sysvals.suspendmode = val elif(arg == '-adb'): try: val = args.next() except: doError('No adb binary supplied', True) if(not os.path.exists(val)): doError('file doesnt exist: %s' % val, False) if(not os.access(val, os.X_OK)): doError('file isnt executable: %s' % val, False) try: check = os.popen(val+' version').read().strip() except: doError('adb version failed to execute', False) if(not re.match('Android Debug Bridge .*', check)): doError('adb version failed to execute', False) sysvals.adb = val sysvals.android = True elif(arg == '-x2'): if(sysvals.postresumetime > 0): doError('-x2 is not compatible with -postres', False) sysvals.execcount = 2 elif(arg == '-x2delay'): sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000) elif(arg == '-postres'): if(sysvals.execcount != 1): doError('-x2 is not compatible with -postres', False) sysvals.postresumetime = getArgInt('-postres', args, 0, 3600) elif(arg == '-f'): sysvals.usecallgraph = True elif(arg == '-modes'): cmd = 'modes' elif(arg == '-fpdt'): cmd = 'fpdt' elif(arg == '-usbtopo'): cmd = 'usbtopo' elif(arg == '-usbauto'): cmd = 'usbauto' elif(arg == '-status'): cmd = 'status' elif(arg == '-verbose'): sysvals.verbose = True elif(arg == '-v'): print("Version %.1f" % sysvals.version) sys.exit() elif(arg == '-rtcwake'): sysvals.rtcwake = True sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600) elif(arg == '-multi'): multitest['run'] = True multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000) multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600) elif(arg == '-dmesg'): try: val = args.next() except: doError('No dmesg file supplied', True) sysvals.notestrun = True sysvals.dmesgfile = val if(os.path.exists(sysvals.dmesgfile) == False): doError('%s doesnt exist' % sysvals.dmesgfile, False) elif(arg == '-ftrace'): try: val = args.next() except: doError('No ftrace file supplied', True) sysvals.notestrun = True sysvals.usecallgraph = True sysvals.ftracefile = val if(os.path.exists(sysvals.ftracefile) == False): doError('%s doesnt exist' % sysvals.ftracefile, False) elif(arg == '-summary'): try: val = args.next() except: doError('No directory supplied', True) cmd = 'summary' cmdarg = val sysvals.notestrun = True if(os.path.isdir(val) == False): doError('%s isnt accesible' % val, False) elif(arg == '-filter'): try: val = args.next() except: doError('No devnames supplied', True) sysvals.setDeviceFilter(val) elif(arg == '-h'): printHelp() sys.exit() else: doError('Invalid argument: '+arg, True) # just run a utility command and exit if(cmd != ''): if(cmd == 'status'): statusCheck() elif(cmd == 'fpdt'): if(sysvals.android): doError('cannot read FPDT on android device', False) getFPDT(True) elif(cmd == 'usbtopo'): if(sysvals.android): doError('cannot read USB topology '+\ 'on an android device', False) detectUSB(True) elif(cmd == 'modes'): modes = getModes() print modes elif(cmd == 'usbauto'): setUSBDevicesAuto() elif(cmd == 'summary'): print("Generating a summary of folder \"%s\"" % cmdarg) runSummary(cmdarg, True) sys.exit() # run test on android device if(sysvals.android): if(sysvals.usecallgraph): doError('ftrace (-f) is not yet supported '+\ 'in the android kernel', False) if(sysvals.notestrun): doError('cannot analyze test files on the '+\ 'android device', False) # if instructed, re-analyze existing data files if(sysvals.notestrun): rerunTest() sys.exit() # verify that we can run a test if(not statusCheck()): print('Check FAILED, aborting the test run!') sys.exit() if multitest['run']: # run multiple tests in a separte subdirectory s = 'x%d' % multitest['count'] subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S') os.mkdir(subdir) for i in range(multitest['count']): if(i != 0): print('Waiting %d seconds...' % (multitest['delay'])) time.sleep(multitest['delay']) print('TEST (%d/%d) START' % (i+1, multitest['count'])) runTest(subdir) print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count'])) runSummary(subdir, False) else: # run the test in the current directory runTest(".")
gpl-2.0
mbayon/TFG-MachineLearning
vbig/lib/python2.7/site-packages/sklearn/metrics/cluster/bicluster.py
359
2797
from __future__ import division import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols def _jaccard(a_rows, a_cols, b_rows, b_cols): """Jaccard coefficient on the elements of the two biclusters.""" intersection = ((a_rows * b_rows).sum() * (a_cols * b_cols).sum()) a_size = a_rows.sum() * a_cols.sum() b_size = b_rows.sum() * b_cols.sum() return intersection / (a_size + b_size - intersection) def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array(list(list(similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)) for i in range(n_a))) return result def consensus_score(a, b, similarity="jaccard"): """The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found using the Hungarian algorithm. The final score is the sum of similarities divided by the size of the larger set. Read more in the :ref:`User Guide <biclustering>`. Parameters ---------- a : (rows, columns) Tuple of row and column indicators for a set of biclusters. b : (rows, columns) Another set of biclusters like ``a``. similarity : string or function, optional, default: "jaccard" May be the string "jaccard" to use the Jaccard coefficient, or any function that takes four arguments, each of which is a 1d indicator vector: (a_rows, a_columns, b_rows, b_columns). References ---------- * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis for bicluster acquisition <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__. """ if similarity == "jaccard": similarity = _jaccard matrix = _pairwise_similarity(a, b, similarity) indices = linear_assignment(1. - matrix) n_a = len(a[0]) n_b = len(b[0]) return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
mit
Denisolt/IEEE-NYIT-MA
local/lib/python2.7/site-packages/django/contrib/gis/gdal/prototypes/raster.py
320
4013
""" This module houses the ctypes function prototypes for GDAL DataSource (raster) related data structures. """ from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p from functools import partial from django.contrib.gis.gdal.libgdal import GDAL_VERSION, std_call from django.contrib.gis.gdal.prototypes.generation import ( const_string_output, double_output, int_output, void_output, voidptr_output, ) # For more detail about c function names and definitions see # http://gdal.org/gdal_8h.html # http://gdal.org/gdalwarper_8h.html # Prepare partial functions that use cpl error codes void_output = partial(void_output, cpl=True) const_string_output = partial(const_string_output, cpl=True) double_output = partial(double_output, cpl=True) # Raster Driver Routines register_all = void_output(std_call('GDALAllRegister'), []) get_driver = voidptr_output(std_call('GDALGetDriver'), [c_int]) get_driver_by_name = voidptr_output(std_call('GDALGetDriverByName'), [c_char_p], errcheck=False) get_driver_count = int_output(std_call('GDALGetDriverCount'), []) get_driver_description = const_string_output(std_call('GDALGetDescription'), [c_void_p]) # Raster Data Source Routines create_ds = voidptr_output(std_call('GDALCreate'), [c_void_p, c_char_p, c_int, c_int, c_int, c_int, c_void_p]) open_ds = voidptr_output(std_call('GDALOpen'), [c_char_p, c_int]) if GDAL_VERSION >= (2, 0): close_ds = voidptr_output(std_call('GDALClose'), [c_void_p]) else: close_ds = void_output(std_call('GDALClose'), [c_void_p]) flush_ds = int_output(std_call('GDALFlushCache'), [c_void_p]) copy_ds = voidptr_output(std_call('GDALCreateCopy'), [c_void_p, c_char_p, c_void_p, c_int, POINTER(c_char_p), c_void_p, c_void_p] ) add_band_ds = void_output(std_call('GDALAddBand'), [c_void_p, c_int]) get_ds_description = const_string_output(std_call('GDALGetDescription'), [c_void_p]) get_ds_driver = voidptr_output(std_call('GDALGetDatasetDriver'), [c_void_p]) get_ds_xsize = int_output(std_call('GDALGetRasterXSize'), [c_void_p]) get_ds_ysize = int_output(std_call('GDALGetRasterYSize'), [c_void_p]) get_ds_raster_count = int_output(std_call('GDALGetRasterCount'), [c_void_p]) get_ds_raster_band = voidptr_output(std_call('GDALGetRasterBand'), [c_void_p, c_int]) get_ds_projection_ref = const_string_output(std_call('GDALGetProjectionRef'), [c_void_p]) set_ds_projection_ref = void_output(std_call('GDALSetProjection'), [c_void_p, c_char_p]) get_ds_geotransform = void_output(std_call('GDALGetGeoTransform'), [c_void_p, POINTER(c_double * 6)], errcheck=False) set_ds_geotransform = void_output(std_call('GDALSetGeoTransform'), [c_void_p, POINTER(c_double * 6)]) # Raster Band Routines band_io = void_output(std_call('GDALRasterIO'), [c_void_p, c_int, c_int, c_int, c_int, c_int, c_void_p, c_int, c_int, c_int, c_int, c_int] ) get_band_xsize = int_output(std_call('GDALGetRasterBandXSize'), [c_void_p]) get_band_ysize = int_output(std_call('GDALGetRasterBandYSize'), [c_void_p]) get_band_index = int_output(std_call('GDALGetBandNumber'), [c_void_p]) get_band_description = const_string_output(std_call('GDALGetDescription'), [c_void_p]) get_band_ds = voidptr_output(std_call('GDALGetBandDataset'), [c_void_p]) get_band_datatype = int_output(std_call('GDALGetRasterDataType'), [c_void_p]) get_band_nodata_value = double_output(std_call('GDALGetRasterNoDataValue'), [c_void_p, POINTER(c_int)]) set_band_nodata_value = void_output(std_call('GDALSetRasterNoDataValue'), [c_void_p, c_double]) get_band_minimum = double_output(std_call('GDALGetRasterMinimum'), [c_void_p, POINTER(c_int)]) get_band_maximum = double_output(std_call('GDALGetRasterMaximum'), [c_void_p, POINTER(c_int)]) # Reprojection routine reproject_image = void_output(std_call('GDALReprojectImage'), [c_void_p, c_char_p, c_void_p, c_char_p, c_int, c_double, c_double, c_void_p, c_void_p, c_void_p] ) auto_create_warped_vrt = voidptr_output(std_call('GDALAutoCreateWarpedVRT'), [c_void_p, c_char_p, c_char_p, c_int, c_double, c_void_p] )
gpl-3.0
molebot/brython
www/src/Lib/test/test_osx_env.py
112
1342
""" Test suite for OS X interpreter environment variables. """ from test.support import EnvironmentVarGuard, run_unittest import subprocess import sys import sysconfig import unittest @unittest.skipUnless(sys.platform == 'darwin' and sysconfig.get_config_var('WITH_NEXT_FRAMEWORK'), 'unnecessary on this platform') class OSXEnvironmentVariableTestCase(unittest.TestCase): def _check_sys(self, ev, cond, sv, val = sys.executable + 'dummy'): with EnvironmentVarGuard() as evg: subpc = [str(sys.executable), '-c', 'import sys; sys.exit(2 if "%s" %s %s else 3)' % (val, cond, sv)] # ensure environment variable does not exist evg.unset(ev) # test that test on sys.xxx normally fails rc = subprocess.call(subpc) self.assertEqual(rc, 3, "expected %s not %s %s" % (ev, cond, sv)) # set environ variable evg.set(ev, val) # test that sys.xxx has been influenced by the environ value rc = subprocess.call(subpc) self.assertEqual(rc, 2, "expected %s %s %s" % (ev, cond, sv)) def test_pythonexecutable_sets_sys_executable(self): self._check_sys('PYTHONEXECUTABLE', '==', 'sys.executable') if __name__ == "__main__": unittest.main()
bsd-3-clause
hhbyyh/spark
python/pyspark/sql/group.py
24
12490
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from pyspark import since from pyspark.rdd import ignore_unicode_prefix, PythonEvalType from pyspark.sql.column import Column, _to_seq from pyspark.sql.dataframe import DataFrame from pyspark.sql.types import * __all__ = ["GroupedData"] def dfapi(f): def _api(self): name = f.__name__ jdf = getattr(self._jgd, name)() return DataFrame(jdf, self.sql_ctx) _api.__name__ = f.__name__ _api.__doc__ = f.__doc__ return _api def df_varargs_api(f): def _api(self, *cols): name = f.__name__ jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols)) return DataFrame(jdf, self.sql_ctx) _api.__name__ = f.__name__ _api.__doc__ = f.__doc__ return _api class GroupedData(object): """ A set of methods for aggregations on a :class:`DataFrame`, created by :func:`DataFrame.groupBy`. .. note:: Experimental .. versionadded:: 1.3 """ def __init__(self, jgd, df): self._jgd = jgd self._df = df self.sql_ctx = df.sql_ctx @ignore_unicode_prefix @since(1.3) def agg(self, *exprs): """Compute aggregates and returns the result as a :class:`DataFrame`. The available aggregate functions can be: 1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count` 2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf` .. note:: There is no partial aggregation with group aggregate UDFs, i.e., a full shuffle is required. Also, all the data of a group will be loaded into memory, so the user should be aware of the potential OOM risk if data is skewed and certain groups are too large to fit in memory. .. seealso:: :func:`pyspark.sql.functions.pandas_udf` If ``exprs`` is a single :class:`dict` mapping from string to string, then the key is the column to perform aggregation on, and the value is the aggregate function. Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions. .. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed in a single call to this function. :param exprs: a dict mapping from column name (string) to aggregate functions (string), or a list of :class:`Column`. >>> gdf = df.groupBy(df.name) >>> sorted(gdf.agg({"*": "count"}).collect()) [Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)] >>> from pyspark.sql import functions as F >>> sorted(gdf.agg(F.min(df.age)).collect()) [Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def min_udf(v): ... return v.min() >>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP [Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)] """ assert exprs, "exprs should not be empty" if len(exprs) == 1 and isinstance(exprs[0], dict): jdf = self._jgd.agg(exprs[0]) else: # Columns assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column" jdf = self._jgd.agg(exprs[0]._jc, _to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]])) return DataFrame(jdf, self.sql_ctx) @dfapi @since(1.3) def count(self): """Counts the number of records for each group. >>> sorted(df.groupBy(df.age).count().collect()) [Row(age=2, count=1), Row(age=5, count=1)] """ @df_varargs_api @since(1.3) def mean(self, *cols): """Computes average values for each numeric columns for each group. :func:`mean` is an alias for :func:`avg`. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().mean('age').collect() [Row(avg(age)=3.5)] >>> df3.groupBy().mean('age', 'height').collect() [Row(avg(age)=3.5, avg(height)=82.5)] """ @df_varargs_api @since(1.3) def avg(self, *cols): """Computes average values for each numeric columns for each group. :func:`mean` is an alias for :func:`avg`. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().avg('age').collect() [Row(avg(age)=3.5)] >>> df3.groupBy().avg('age', 'height').collect() [Row(avg(age)=3.5, avg(height)=82.5)] """ @df_varargs_api @since(1.3) def max(self, *cols): """Computes the max value for each numeric columns for each group. >>> df.groupBy().max('age').collect() [Row(max(age)=5)] >>> df3.groupBy().max('age', 'height').collect() [Row(max(age)=5, max(height)=85)] """ @df_varargs_api @since(1.3) def min(self, *cols): """Computes the min value for each numeric column for each group. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().min('age').collect() [Row(min(age)=2)] >>> df3.groupBy().min('age', 'height').collect() [Row(min(age)=2, min(height)=80)] """ @df_varargs_api @since(1.3) def sum(self, *cols): """Compute the sum for each numeric columns for each group. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().sum('age').collect() [Row(sum(age)=7)] >>> df3.groupBy().sum('age', 'height').collect() [Row(sum(age)=7, sum(height)=165)] """ @since(1.6) def pivot(self, pivot_col, values=None): """ Pivots a column of the current :class:`DataFrame` and perform the specified aggregation. There are two versions of pivot function: one that requires the caller to specify the list of distinct values to pivot on, and one that does not. The latter is more concise but less efficient, because Spark needs to first compute the list of distinct values internally. :param pivot_col: Name of the column to pivot. :param values: List of values that will be translated to columns in the output DataFrame. # Compute the sum of earnings for each year by course with each course as a separate column >>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect() [Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)] # Or without specifying column values (less efficient) >>> df4.groupBy("year").pivot("course").sum("earnings").collect() [Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)] >>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect() [Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)] """ if values is None: jgd = self._jgd.pivot(pivot_col) else: jgd = self._jgd.pivot(pivot_col, values) return GroupedData(jgd, self._df) @since(2.3) def apply(self, udf): """ Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result as a `DataFrame`. The user-defined function should take a `pandas.DataFrame` and return another `pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame` to the user-function and the returned `pandas.DataFrame` are combined as a :class:`DataFrame`. The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the returnType of the pandas udf. .. note:: This function requires a full shuffle. all the data of a group will be loaded into memory, so the user should be aware of the potential OOM risk if data is skewed and certain groups are too large to fit in memory. .. note:: Experimental :param udf: a grouped map user-defined function returned by :func:`pyspark.sql.functions.pandas_udf`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def normalize(pdf): ... v = pdf.v ... return pdf.assign(v=(v - v.mean()) / v.std()) >>> df.groupby("id").apply(normalize).show() # doctest: +SKIP +---+-------------------+ | id| v| +---+-------------------+ | 1|-0.7071067811865475| | 1| 0.7071067811865475| | 2|-0.8320502943378437| | 2|-0.2773500981126146| | 2| 1.1094003924504583| +---+-------------------+ .. seealso:: :meth:`pyspark.sql.functions.pandas_udf` """ # Columns are special because hasattr always return True if isinstance(udf, Column) or not hasattr(udf, 'func') \ or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF: raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type " "GROUPED_MAP.") df = self._df udf_column = udf(*[df[col] for col in df.columns]) jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr()) return DataFrame(jdf, self.sql_ctx) def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.group globs = pyspark.sql.group.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.group tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80), Row(name='Bob', age=5, height=85)]).toDF() globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000), Row(course="Java", year=2012, earnings=20000), Row(course="dotNET", year=2012, earnings=5000), Row(course="dotNET", year=2013, earnings=48000), Row(course="Java", year=2013, earnings=30000)]).toDF() globs['df5'] = sc.parallelize([ Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)), Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)), Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)), Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)), Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.group, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
apache-2.0
171121130/SWI
venv/Lib/site-packages/mako/ext/pygmentplugin.py
61
4530
# ext/pygmentplugin.py # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file> # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from pygments.lexers.web import \ HtmlLexer, XmlLexer, JavascriptLexer, CssLexer from pygments.lexers.agile import PythonLexer, Python3Lexer from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \ include, using from pygments.token import \ Text, Comment, Operator, Keyword, Name, String, Other from pygments.formatters.html import HtmlFormatter from pygments import highlight from mako import compat class MakoLexer(RegexLexer): name = 'Mako' aliases = ['mako'] filenames = ['*.mao'] tokens = { 'root': [ (r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)', bygroups(Text, Comment.Preproc, Keyword, Other)), (r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, using(PythonLexer), Other)), (r'(\s*)(##[^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, Other)), (r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc), (r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'), (r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), (r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(\$\{)(.*?)(\})', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'''(?sx) (.+?) # anything, followed by: (?: (?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line (?=\#\*) | # multiline comment (?=</?%) | # a python block # call start or end (?=\$\{) | # a substitution (?<=\n)(?=\s*%) | # - don't consume (\\\n) | # an escaped newline \Z # end of string ) ''', bygroups(Other, Operator)), (r'\s+', Text), ], 'ondeftags': [ (r'<%', Comment.Preproc), (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin), include('tag'), ], 'tag': [ (r'((?:\w+)\s*=)\s*(".*?")', bygroups(Name.Attribute, String)), (r'/?\s*>', Comment.Preproc, '#pop'), (r'\s+', Text), ], 'attr': [ ('".*?"', String, '#pop'), ("'.*?'", String, '#pop'), (r'[^\s>]+', String, '#pop'), ], } class MakoHtmlLexer(DelegatingLexer): name = 'HTML+Mako' aliases = ['html+mako'] def __init__(self, **options): super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, **options) class MakoXmlLexer(DelegatingLexer): name = 'XML+Mako' aliases = ['xml+mako'] def __init__(self, **options): super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, **options) class MakoJavascriptLexer(DelegatingLexer): name = 'JavaScript+Mako' aliases = ['js+mako', 'javascript+mako'] def __init__(self, **options): super(MakoJavascriptLexer, self).__init__(JavascriptLexer, MakoLexer, **options) class MakoCssLexer(DelegatingLexer): name = 'CSS+Mako' aliases = ['css+mako'] def __init__(self, **options): super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, **options) pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted', linenos=True) def syntax_highlight(filename='', language=None): mako_lexer = MakoLexer() if compat.py3k: python_lexer = Python3Lexer() else: python_lexer = PythonLexer() if filename.startswith('memory:') or language == 'mako': return lambda string: highlight(string, mako_lexer, pygments_html_formatter) return lambda string: highlight(string, python_lexer, pygments_html_formatter)
mit
iambibhas/django
django/conf/locale/cs/formats.py
115
1702
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. E Y' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j. E Y G:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y G:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y', '%d.%m.%y', # '05.01.2006', '05.01.06' '%d. %m. %Y', '%d. %m. %y', # '5. 1. 2006', '5. 1. 06' # '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006' ) # Kept ISO formats as one is in first position TIME_INPUT_FORMATS = ( '%H:%M:%S', # '04:30:59' '%H.%M', # '04.30' '%H:%M', # '04:30' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y %H:%M:%S', # '05.01.2006 04:30:59' '%d.%m.%Y %H:%M:%S.%f', # '05.01.2006 04:30:59.000200' '%d.%m.%Y %H.%M', # '05.01.2006 04.30' '%d.%m.%Y %H:%M', # '05.01.2006 04:30' '%d.%m.%Y', # '05.01.2006' '%d. %m. %Y %H:%M:%S', # '05. 01. 2006 04:30:59' '%d. %m. %Y %H:%M:%S.%f', # '05. 01. 2006 04:30:59.000200' '%d. %m. %Y %H.%M', # '05. 01. 2006 04.30' '%d. %m. %Y %H:%M', # '05. 01. 2006 04:30' '%d. %m. %Y', # '05. 01. 2006' '%Y-%m-%d %H.%M', # '2006-01-05 04.30' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
bsd-3-clause
Gilbert32/leo-3.4
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
renyi533/tensorflow
tensorflow/python/keras/optimizer_v2/adadelta_test.py
2
8161
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adadelta Optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import adadelta from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test _DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64] # TODO(b/143684500): Eigen to support complex sqrt if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()): _DATA_TYPES += [dtypes.complex64, dtypes.complex128] class AdadeltaOptimizerTest(test.TestCase): def doTestBasic(self, use_resource=False, use_callable_params=False): num_updates = 4 # number of ADADELTA steps to perform for dtype in _DATA_TYPES: for grad in [0.2, 0.1, 0.01]: for lr in [1.0, 0.5, 0.1]: var0_init = [1.0, 2.0] var1_init = [3.0, 4.0] if use_resource: var0 = resource_variable_ops.ResourceVariable( var0_init, dtype=dtype) var1 = resource_variable_ops.ResourceVariable( var1_init, dtype=dtype) else: var0 = variables.Variable(var0_init, dtype=dtype) var1 = variables.Variable(var1_init, dtype=dtype) grads = constant_op.constant([grad, grad], dtype=dtype) accum = 0.0 accum_update = 0.0 # ADADELTA gradient optimizer rho = 0.95 epsilon = 1e-8 if use_callable_params: adadelta_opt = adadelta.Adadelta( learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop rho=lambda: rho, # pylint: disable=cell-var-from-loop epsilon=epsilon) # pylint: disable=cell-var-from-loop else: adadelta_opt = adadelta.Adadelta( learning_rate=lr, rho=rho, epsilon=epsilon) if not context.executing_eagerly(): adadelta_update = adadelta_opt.apply_gradients( zip([grads, grads], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Assign slots slot = [None] * 2 slot_update = [None] * 2 slot[0] = adadelta_opt.get_slot(var0, "accum_grad") self.assertEqual(slot[0].shape, var0.shape) slot_update[0] = adadelta_opt.get_slot(var0, "accum_var") self.assertEqual(slot_update[0].shape, var0.shape) slot[1] = adadelta_opt.get_slot(var1, "accum_grad") self.assertEqual(slot[1].shape, var1.shape) slot_update[1] = adadelta_opt.get_slot(var1, "accum_var") self.assertEqual(slot_update[1].shape, var1.shape) # Fetch params to validate initial values self.assertAllClose(var0_init, self.evaluate(var0)) self.assertAllClose(var1_init, self.evaluate(var1)) update = [None] * num_updates tot_update = 0 for step in range(num_updates): # Run adadelta update for comparison if not context.executing_eagerly(): self.evaluate(adadelta_update) else: adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1])) # Perform initial update without previous accum values accum = accum * rho + (grad**2) * (1 - rho) update[step] = ( np.sqrt(accum_update + epsilon) * (1. / np.sqrt(accum + epsilon)) * grad) accum_update = ( accum_update * rho + (update[step]**2) * (1.0 - rho)) tot_update += update[step] * lr if not context.executing_eagerly(): # Check that the accumulators have been updated # TODO(lxuechen): This is hard to test in eager mode for slot_idx in range(2): self.assertAllCloseAccordingToType( np.array([accum, accum], dtype=dtype.as_numpy_dtype(0)), self.evaluate(slot[slot_idx]), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [accum_update, accum_update], dtype=dtype.as_numpy_dtype(0)), self.evaluate(slot_update[slot_idx]), rtol=1e-5) # Check that the parameters have been updated self.assertAllCloseAccordingToType( np.array( [var0_init[0] - tot_update, var0_init[1] - tot_update], dtype=dtype.as_numpy_dtype(0)), self.evaluate(var0), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [var1_init[0] - tot_update, var1_init[1] - tot_update], dtype=dtype.as_numpy_dtype(0)), self.evaluate(var1), rtol=1e-5) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testResourceBasic(self): self.doTestBasic(use_resource=True) def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic(use_resource=True, use_callable_params=True) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with ops.Graph().as_default(): for dtype in _DATA_TYPES: var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize( loss, var_list=[var0]) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0)) def testConstructAdadeltaWithLR(self): opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.) opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0) opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) def testConstructAdadeltaWithEpsilonValues(self): opt = adadelta.Adadelta(epsilon=None) self.assertEqual(opt.epsilon, 1e-7) opt = adadelta.Adadelta(epsilon=1e-8) self.assertEqual(opt.epsilon, 1e-8) if __name__ == "__main__": test.main()
apache-2.0
sammerry/ansible
lib/ansible/executor/playbook_executor.py
24
11332
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import getpass import locale import signal import sys from ansible import constants as C from ansible.errors import * from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook from ansible.plugins import module_loader from ansible.template import Templar from ansible.utils.color import colorize, hostcolor from ansible.utils.debug import debug from ansible.utils.encrypt import do_encrypt from ansible.utils.unicode import to_unicode class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._display = display self._options = options self.passwords = passwords # make sure the module path (if specified) is parsed and # added to the module_loader object if options.module_path is not None: for path in options.module_path.split(os.pathsep): module_loader.add_directory(path) if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' signal.signal(signal.SIGINT, self._cleanup) result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir(os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] i = 1 plays = pb.get_plays() self._display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: if 'name' not in var: raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds) vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in play.vars: self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback('v2_playbook_on_play_start', new_play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # if the last result wasn't zero, break out of the serial batch loop if result != 0: break # if the last result wasn't zero, break out of the play loop if result != 0: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._cleanup() if self._options.syntax: self.display.display("No issues encountered") return result # FIXME: this stat summary stuff should be cleaned up and moved # to a new method, if it even belongs here... self._display.banner("PLAY RECAP") hosts = sorted(self._tqm._stats.processed.keys()) for h in hosts: t = self._tqm._stats.summarize(h) self._display.display("%s : %s %s %s %s" % ( hostcolor(h, t), colorize('ok', t['ok'], 'green'), colorize('changed', t['changed'], 'yellow'), colorize('unreachable', t['unreachable'], 'red'), colorize('failed', t['failures'], 'red')), screen_only=True ) self._display.display("%s : %s %s %s %s" % ( hostcolor(h, t, False), colorize('ok', t['ok'], None), colorize('changed', t['changed'], None), colorize('unreachable', t['unreachable'], None), colorize('failed', t['failures'], None)), log_only=True ) self._display.display("", screen_only=True) # END STATS STUFF return result def _cleanup(self, signum=None, framenum=None): return self._tqm.cleanup() def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on the serial size specified in the play. ''' # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts) # check to see if the serial number was specified as a percentage, # and convert it to an integer value based on the number of hosts if isinstance(play.serial, basestring) and play.serial.endswith('%'): serial_pct = int(play.serial.replace("%","")) serial = int((serial_pct/100.0) * len(all_hosts)) else: serial = int(play.serial) # if the serial count was not specified or is invalid, default to # a list of all hosts, otherwise split the list of hosts into chunks # which are based on the serial size if serial <= 0: return [all_hosts] else: serialized_batches = [] while len(all_hosts) > 0: play_hosts = [] for x in range(serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop(0)) serialized_batches.append(play_hosts) return serialized_batches def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): if sys.__stdin__.isatty(): if prompt and default is not None: msg = "%s [%s]: " % (prompt, default) elif prompt: msg = "%s: " % prompt else: msg = 'input for %s: ' % varname def do_prompt(prompt, private): if sys.stdout.encoding: msg = prompt.encode(sys.stdout.encoding) else: # when piping the output, or at other times when stdout # may not be the standard file descriptor, the stdout # encoding may not be set, so default to something sane msg = prompt.encode(locale.getpreferredencoding()) if private: return getpass.getpass(msg) return raw_input(msg) if confirm: while True: result = do_prompt(msg, private) second = do_prompt("confirm " + msg, private) if result == second: break self._display.display("***** VALUES ENTERED DO NOT MATCH ****") else: result = do_prompt(msg, private) else: result = None self._display.warning("Not prompting as we are not in interactive mode") # if result is false and default is not None if not result and default is not None: result = default if encrypt: result = do_encrypt(result, encrypt, salt_size, salt) # handle utf-8 chars result = to_unicode(result, errors='strict') return result
gpl-3.0
zozo123/buildbot
master/buildbot/monkeypatches/python14653.py
3
1194
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members import calendar import time def fixed_mktime_tz(data): if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = calendar.timegm(data) return t - data[9] def patch(): # Fix for http://bugs.python.org/issue14653 for Python 2.7.3 and below. # Required to fix http://trac.buildbot.net/ticket/2522 issue import email.utils email.utils.mktime_tz = fixed_mktime_tz
gpl-3.0
sameetb-cuelogic/edx-platform-test
common/lib/logsettings.py
39
5740
import os import platform import sys from logging.handlers import SysLogHandler LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] def get_logger_config(log_dir, logging_env="no_env", tracking_filename="tracking.log", edx_filename="edx.log", dev_env=False, syslog_addr=None, debug=False, local_loglevel='INFO', console_loglevel=None, service_variant=None): """ Return the appropriate logging config dictionary. You should assign the result of this to the LOGGING var in your settings. The reason it's done this way instead of registering directly is because I didn't want to worry about resetting the logging state if this is called multiple times when settings are extended. If dev_env is set to true logging will not be done via local rsyslogd, instead, tracking and application logs will be dropped in log_dir. "tracking_filename" and "edx_filename" are ignored unless dev_env is set to true since otherwise logging is handled by rsyslogd. """ # Revert to INFO if an invalid string is passed in if local_loglevel not in LOG_LEVELS: local_loglevel = 'INFO' if console_loglevel is None or console_loglevel not in LOG_LEVELS: console_loglevel = 'DEBUG' if debug else 'INFO' if service_variant is None: # default to a blank string so that if SERVICE_VARIANT is not # set we will not log to a sub directory service_variant = '' hostname = platform.node().split(".")[0] syslog_format = ("[service_variant={service_variant}]" "[%(name)s][env:{logging_env}] %(levelname)s " "[{hostname} %(process)d] [%(filename)s:%(lineno)d] " "- %(message)s").format(service_variant=service_variant, logging_env=logging_env, hostname=hostname) handlers = ['console', 'local'] if syslog_addr: handlers.append('syslogger-remote') logger_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s %(levelname)s %(process)d ' '[%(name)s] %(filename)s:%(lineno)d - %(message)s', }, 'syslog_format': {'format': syslog_format}, 'raw': {'format': '%(message)s'}, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', } }, 'handlers': { 'console': { 'level': console_loglevel, 'class': 'logging.StreamHandler', 'formatter': 'standard', 'stream': sys.stderr, }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'newrelic': { 'level': 'ERROR', 'class': 'lms.lib.newrelic_logging.NewRelicHandler', 'formatter': 'raw', } }, 'loggers': { 'tracking': { 'handlers': ['tracking'], 'level': 'DEBUG', 'propagate': False, }, '': { 'handlers': handlers, 'level': 'DEBUG', 'propagate': False }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } if syslog_addr: logger_config['handlers'].update({ 'syslogger-remote': { 'level': 'INFO', 'class': 'logging.handlers.SysLogHandler', 'address': syslog_addr, 'formatter': 'syslog_format', }, }) if dev_env: tracking_file_loc = os.path.join(log_dir, tracking_filename) edx_file_loc = os.path.join(log_dir, edx_filename) logger_config['handlers'].update({ 'local': { 'class': 'logging.handlers.RotatingFileHandler', 'level': local_loglevel, 'formatter': 'standard', 'filename': edx_file_loc, 'maxBytes': 1024 * 1024 * 2, 'backupCount': 5, }, 'tracking': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': tracking_file_loc, 'formatter': 'raw', 'maxBytes': 1024 * 1024 * 2, 'backupCount': 5, }, }) else: # for production environments we will only # log INFO and up logger_config['loggers']['']['level'] = 'INFO' logger_config['handlers'].update({ 'local': { 'level': local_loglevel, 'class': 'logging.handlers.SysLogHandler', 'address': '/dev/log', 'formatter': 'syslog_format', 'facility': SysLogHandler.LOG_LOCAL0, }, 'tracking': { 'level': 'DEBUG', 'class': 'logging.handlers.SysLogHandler', 'address': '/dev/log', 'facility': SysLogHandler.LOG_LOCAL1, 'formatter': 'raw', }, }) return logger_config
agpl-3.0
wichovw/tca-gt
server/tca/cars.py
1
1570
import uuid import random class Car: speed = 0 change_lane_intention = 0 street = None next_street = None probability = {} def __init__(self, **kwargs): if 'id' in kwargs: self.id = kwargs['id'] else: self.id = str(uuid.uuid4()) if 'speed' in kwargs: self.speed = kwargs['speed'] if 'change_lane_intention' in kwargs: self.change_lane_intention = kwargs['change_lane_intention'] if 'street' in kwargs: self.street = kwargs['street'] if 'next_street' in kwargs: self.next_street = kwargs['next_street'] if 'probability' in kwargs: self.probability = kwargs['probability'] # probability self.probability['random_slow_p'] = random.random() self.probability['change_lane_p'] = random.random() self.probability['turn_street_p'] = random.random() def clone(self): return Car(id=self.id, speed=self.speed, change_lane_intention=self.change_lane_intention, street=self.street, next_street=self.next_street, probability=self.probability) def get_personality_color(self): """ Return a representative color based on personality :return: hex color """ r = lambda: random.randint(0, 255) return '#%02X%02X%02X' % (r(), r(), r()) def __repr__(self): return "<Car [%s] s:%d>" % (self.id[:4], self.speed)
mit
zincumyx/Mammoth
mammoth-src/build/contrib/hod/testing/helper.py
182
1122
#Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import sys sampleText = "Hello World!" if __name__=="__main__": args = sys.argv[1:] if args[0] == "1": # print sample text to stderr sys.stdout.write(sampleText) elif args[0] == "2": # print sample text to stderr sys.stderr.write(sampleText) # Add any other helper programs here, with different values for args[0] pass
apache-2.0
Ernesto99/odoo
addons/l10n_es/__openerp__.py
314
2772
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2008-2010 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved. # Jordi Esteve <jesteve@zikzakmedia.com> # Copyright (c) 2012-2013, Grupo OPENTIA (<http://opentia.com>) Registered EU Trademark. # Dpto. Consultoría <consultoria@opentia.es> # Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com) # Pedro Manuel Baeza <pedro.baeza@serviciosbaeza.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name" : "Spanish Charts of Accounts (PGCE 2008)", "version" : "4.0", "author" : "Spanish Localization Team", 'website' : 'https://launchpad.net/openerp-spain', "category" : "Localization/Account Charts", "description": """ Spanish charts of accounts (PGCE 2008). ======================================== * Defines the following chart of account templates: * Spanish general chart of accounts 2008 * Spanish general chart of accounts 2008 for small and medium companies * Spanish general chart of accounts 2008 for associations * Defines templates for sale and purchase VAT * Defines tax code templates * Defines fiscal positions for spanish fiscal legislation """, "license" : "AGPL-3", "depends" : ["account", "base_vat", "base_iban"], "data" : [ "account_type.xml", "account_chart_template.xml", "account_account_common.xml", "account_account_full.xml", "account_account_pymes.xml", "account_account_assoc.xml", "tax_codes_common.xml", "taxes_common.xml", "fiscal_templates_common.xml", "account_chart_template_post.xml", "l10n_es_wizard.xml", ], "demo" : [], 'auto_install': False, "installable": True, 'images': ['images/config_chart_l10n_es.png', 'images/l10n_es_chart.png'], }
agpl-3.0
brianinnes/pycupi
python/vPiP/generators/concircle.py
2
3534
# Copyright 2016 Mark Benson, portions Copyright 2016 Brian Innes # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import traceback import math # How do we generate concentric circles? # We generate n circles of varying size. # The higher the density, the more circles # So we can take the density and create a big circle of size = density and iterate down to zero # How do we ensure the circles are spaced far enough apart? # That could be based on the pen size/density/max pixel size or we could take the density value and divide it by some # number - say 2 or 3 to space the circles out. Or it could be dynamically calculated. # For a first pass, Lets do it manually with a fixed spacing. def generateConcentricCircle(positionX,positionY,maxSize,density): radius = int(density) # How big is our biggest circle points = [] # List of genereated co-ordinates (Could probably be replaced with a tuple to store the last value) spacing = 3 # How far apart (in pixels) are our concentric circles numberOfCircles = int(radius / spacing) # How many concentric circles try: # Iterate over the number of circles for circleNumber in range(numberOfCircles,0,-1): # Start with the biggest circle and work down radius = circleNumber * spacing # Work out the x & y coordinates of the circle using the Angle and the radius. # Well do this for every whole degree in a circle which might cause a lumpy # circle when the circle is big... but we can look at that case if it happens. # Also we have to work out when a result is a step change # (i.e. the value we get isn't in the same pixel as the last one. for angle in range(0,360): y = int((math.sin(math.radians(angle)) * radius)) + positionY x = int((math.cos(math.radians(angle)) * radius)) + positionX # Step change. How do we tell if the values have changed much? # Compare our newly genereated x,y values against the last stored set of values # If they are the same, don't bother storing them, it they are new, store them # Does this make a clean circle or are their odd pixels? # See note above about replacing the list as we probably don't need to list anymore if len(points): #If the list has anything in it check new points against old if points[len(points)-1][0] != x or points[len(points)-1][1] != y: points.append((x,y)) yield((x,y)) else: points.append((x,y)) yield((x,y)) #for each in points: # yield(each[0],each[1]) except: exc_type, exc_value, exc_traceback = sys.exc_info() print("exception : %s" % exc_type) traceback.print_tb(exc_traceback, limit=2, file=sys.stdout) return
apache-2.0
daenamkim/ansible
lib/ansible/modules/network/illumos/ipadm_ifprop.py
33
8192
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Adam Števko <adam.stevko@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ipadm_ifprop short_description: Manage IP interface properties on Solaris/illumos systems. description: - Modify IP interface properties on Solaris/illumos systems. version_added: "2.3" author: Adam Števko (@xen0l) options: interface: description: - Specifies the IP interface we want to manage. required: true aliases: [nic] protocol: description: - Specifies the procotol for which we want to manage properties. required: true property: description: - Specifies the name of the property we want to manage. required: true aliases: [name] value: description: - Specifies the value we want to set for the property. required: false temporary: description: - Specifies that the property value is temporary. Temporary property values do not persist across reboots. required: false default: false state: description: - Set or reset the property value. required: false default: present choices: [ "present", "absent", "reset" ] ''' EXAMPLES = ''' name: Allow forwarding of IPv4 packets on network interface e1000g0 ipadm_ifprop: protocol=ipv4 property=forwarding value=on interface=e1000g0 name: Temporarily reset IPv4 forwarding property on network interface e1000g0 ipadm_ifprop: protocol=ipv4 interface=e1000g0 temporary=true property=forwarding state=reset name: Configure IPv6 metric on network interface e1000g0 ipadm_ifprop: protocol=ipv6 nic=e1000g0 name=metric value=100 name: Set IPv6 MTU on network interface bge0 ipadm_ifprop: interface=bge0 name=mtu value=1280 protocol=ipv6 ''' RETURN = ''' protocol: description: property's protocol returned: always type: str sample: ipv4 property: description: property's name returned: always type: str sample: mtu interface: description: interface name we want to set property on returned: always type: str sample: e1000g0 state: description: state of the target returned: always type: string sample: present value: description: property's value returned: when value is provided type: str sample: 1280 ''' from ansible.module_utils.basic import AnsibleModule SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6'] class IfProp(object): def __init__(self, module): self.module = module self.interface = module.params['interface'] self.protocol = module.params['protocol'] self.property = module.params['property'] self.value = module.params['value'] self.temporary = module.params['temporary'] self.state = module.params['state'] def property_exists(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('show-ifprop') cmd.append('-p') cmd.append(self.property) cmd.append('-m') cmd.append(self.protocol) cmd.append(self.interface) (rc, _, _) = self.module.run_command(cmd) if rc == 0: return True else: self.module.fail_json(msg='Unknown %s property "%s" on IP interface %s' % (self.protocol, self.property, self.interface), protocol=self.protocol, property=self.property, interface=self.interface) def property_is_modified(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('show-ifprop') cmd.append('-c') cmd.append('-o') cmd.append('current,default') cmd.append('-p') cmd.append(self.property) cmd.append('-m') cmd.append(self.protocol) cmd.append(self.interface) (rc, out, _) = self.module.run_command(cmd) out = out.rstrip() (value, default) = out.split(':') if rc == 0 and value == default: return True else: return False def property_is_set(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('show-ifprop') cmd.append('-c') cmd.append('-o') cmd.append('current') cmd.append('-p') cmd.append(self.property) cmd.append('-m') cmd.append(self.protocol) cmd.append(self.interface) (rc, out, _) = self.module.run_command(cmd) out = out.rstrip() if rc == 0 and self.value == out: return True else: return False def set_property(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('set-ifprop') if self.temporary: cmd.append('-t') cmd.append('-p') cmd.append(self.property + "=" + self.value) cmd.append('-m') cmd.append(self.protocol) cmd.append(self.interface) return self.module.run_command(cmd) def reset_property(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('reset-ifprop') if self.temporary: cmd.append('-t') cmd.append('-p') cmd.append(self.property) cmd.append('-m') cmd.append(self.protocol) cmd.append(self.interface) return self.module.run_command(cmd) def main(): module = AnsibleModule( argument_spec=dict( protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS), property=dict(required=True, aliases=['name']), value=dict(required=False), temporary=dict(default=False, type='bool'), interface=dict(required=True, default=None, aliases=['nic']), state=dict( default='present', choices=['absent', 'present', 'reset']), ), supports_check_mode=True ) ifprop = IfProp(module) rc = None out = '' err = '' result = {} result['protocol'] = ifprop.protocol result['property'] = ifprop.property result['interface'] = ifprop.interface result['state'] = ifprop.state if ifprop.value: result['value'] = ifprop.value if ifprop.state == 'absent' or ifprop.state == 'reset': if ifprop.property_exists(): if not ifprop.property_is_modified(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = ifprop.reset_property() if rc != 0: module.fail_json(protocol=ifprop.protocol, property=ifprop.property, interface=ifprop.interface, msg=err, rc=rc) elif ifprop.state == 'present': if ifprop.value is None: module.fail_json(msg='Value is mandatory with state "present"') if ifprop.property_exists(): if not ifprop.property_is_set(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = ifprop.set_property() if rc != 0: module.fail_json(protocol=ifprop.protocol, property=ifprop.property, interface=ifprop.interface, msg=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
MrSurly/micropython
tests/extmod/ubinascii_a2b_base64.py
14
1248
try: try: import ubinascii as binascii except ImportError: import binascii except ImportError: print("SKIP") raise SystemExit print(binascii.a2b_base64(b"")) print(binascii.a2b_base64(b"Zg==")) print(binascii.a2b_base64(b"Zm8=")) print(binascii.a2b_base64(b"Zm9v")) print(binascii.a2b_base64(b"Zm9vYg==")) print(binascii.a2b_base64(b"Zm9vYmE=")) print(binascii.a2b_base64(b"Zm9vYmFy")) print(binascii.a2b_base64(b"AAECAwQFBgc=")) print(binascii.a2b_base64(b"CAkKCwwNDg8=")) print(binascii.a2b_base64(b"f4D/")) print(binascii.a2b_base64(b"f4D+")) # convert '+' print(binascii.a2b_base64(b"MTIzNEFCQ0RhYmNk")) # Ignore invalid characters and pad sequences print(binascii.a2b_base64(b"Zm9v\n")) print(binascii.a2b_base64(b"Zm\x009v\n")) print(binascii.a2b_base64(b"Zm9v==")) print(binascii.a2b_base64(b"Zm9v===")) print(binascii.a2b_base64(b"Zm9v===YmFy")) try: print(binascii.a2b_base64(b"abc")) except ValueError: print("ValueError") try: print(binascii.a2b_base64(b"abcde=")) except ValueError: print("ValueError") try: print(binascii.a2b_base64(b"ab*d")) except ValueError: print("ValueError") try: print(binascii.a2b_base64(b"ab=cdef=")) except ValueError: print("ValueError")
mit
Gebesa-Dev/Addons-gebesa
account_invoice_stock_picking_id/models/account_invoice.py
1
18261
# -*- coding: utf-8 -*- # © <YEAR(S)> <AUTHOR(S)> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from openerp import _, api, fields, models from openerp.tools import DEFAULT_SERVER_DATE_FORMAT from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp.exceptions import UserError import pytz class AccountInvoice(models.Model): _name = 'account.invoice' _inherit = 'account.invoice' picking_id = fields.Many2one('stock.picking', ondelete='restrict', string=_("Related Picking"), index=True, readonly=True) picking_ids2 = fields.Many2many( 'stock.picking', 'acc_invoice_picking_rel', 'invoice_id', 'picking_id', ondelete='restrict', string=_("Related Picking"), index=True, readonly=True) @api.multi def action_move_create(self): res = super(AccountInvoice, self).action_move_create() for inv in self: if inv.type in ['out_invoice']: if not inv.sale_id and not inv.picking_ids2: if not inv.prepayment_ok: inv._create_pickings_and_procurements(None) elif inv.type in ['in_invoice']: lines_ids = inv.mapped('invoice_line_ids').filtered( lambda r: not r.purchase_line_id and r.product_id.type in ('product', 'consu')) if lines_ids and not inv.picking_ids2: if not inv.prepayment_ok: inv._create_pickings_and_procurements_in_invoice() return res def _create_pickings_and_procurements_in_invoice(self, picking_id=False): move_obj = self.env['stock.move'] picking_obj = self.env['stock.picking'] lines_ids = self.mapped('invoice_line_ids').filtered( lambda r: not r.purchase_line_id and r.product_id.type in ('product', 'consu')) for line in lines_ids: date_planned = self._get_date_planned(line) if not picking_id: picking_id = picking_obj.create( self._prepare_order_picking(line, 'in')) for move in self._prepare_order_line_move( line, picking_id, date_planned, 'in'): move_obj.create(move) if picking_id: for picking in picking_id: picking.action_confirm() for move in picking.move_lines: move.force_assign() for pack in picking.pack_operation_ids: if pack.product_qty > 0: pack.write({'qty_done': pack.product_qty}) else: pack.unlink() picking.do_transfer() if picking_id: self.picking_ids2 = picking_id return True def _create_pickings_and_procurements(self, picking_id=False): move_obj = self.env['stock.move'] picking_obj = self.env['stock.picking'] # procurement_obj = self.env['procurement.order'] # proc_ids = [] generate = False if 'product' in self.invoice_line_ids.mapped( 'product_id').mapped('type'): generate = True if generate: pickings = [] for line in self.invoice_line_ids: if not line.account_analytic_id: raise UserError(_('The %s product line does not have an \ analytical account') % line.product_id.default_code) if not line.account_analytic_id.warehouse_id: raise UserError(_('The %s analytical account does not \ have an assigned warehouse') % line.account_analytic_id.name) warehouses = self.invoice_line_ids.mapped( 'account_analytic_id').mapped('warehouse_id') for warehouse in warehouses: picking_id = False lines = self.invoice_line_ids.filtered( lambda r: r.account_analytic_id.warehouse_id.id == warehouse.id) for line in lines: date_planned = self._get_date_planned(line) if line.product_id: if line.product_id.type in ('product', 'consu'): if not picking_id: picking_id = picking_obj.create( self._prepare_order_picking(line, 'out')) pickings.append(picking_id.id) # move_id = move_obj.create( for move in self._prepare_order_line_move( line, picking_id, date_planned, 'out'): move_obj.create(move) # else: # move_id = False # proc_id = procurement_obj.create( # self._prepare_order_line_procurement( # line, move_id, date_planned)) # proc_ids.append(proc_id) # line.procurement_id = proc_id # self.ship_recreate(line, move_id, proc_id) if picking_id: for picking in picking_id: picking.action_confirm() for move in picking.move_lines: move.force_assign() for pack in picking.pack_operation_ids: if pack.product_qty > 0: pack.write({'qty_done': pack.product_qty}) else: pack.unlink() picking.do_transfer() # for proc_id in proc_ids: # proc_id.run() if picking_id: self.picking_ids2 = pickings return True def _get_date_planned(self, line): start_date = self.date_to_datetime(self.date_invoice) date_planned = datetime.strptime( start_date, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta( days=0.0) date_planned = (date_planned - timedelta( days=self.company_id.security_lead) ).strftime(DEFAULT_SERVER_DATETIME_FORMAT) return date_planned def _prepare_order_picking(self, line, type_move): # picking_name = self.env['ir.sequence'].get('stock.picking') move_type_obj = self.env['stock.move.type'] location_obj = self.env['stock.location'] if type_move == 'out': move_type_id = move_type_obj.search([('code', '=', 'S1')]) or False account_analytic = line.account_analytic_id warehouse_id = account_analytic.warehouse_id location = location_obj.search([ ('stock_warehouse_id', '=', warehouse_id.id), ('type_stock_loc', '=', 'fp')]) location_dest = self.partner_id.property_stock_customer if not location_dest: raise UserError(_( 'El cliente %s no tiene asignada una ubicacion de cliente') % self.partner_id.name) partner = self.partner_shipping_id.id picking_type = warehouse_id.out_type_id.id else: move_type_id = move_type_obj.search([('code', '=', 'E2')]) or False account_analytic = self.account_analytic_id warehouse_id = account_analytic.warehouse_id location = self.partner_id.property_stock_supplier if not location: raise UserError(_( 'El proveedor %s no tiene asignada una ubicacion de proveedor') % self.partner_id.name) if self.company_id.is_manufacturer: location_dest = location_obj.search([ ('stock_warehouse_id', '=', warehouse_id.id), ('type_stock_loc', '=', 'rm')]) else: location_dest = location_obj.search([ ('stock_warehouse_id', '=', warehouse_id.id), ('type_stock_loc', '=', 'fp')]) partner = self.partner_id.id picking_type = warehouse_id.in_type_id.id return { # 'name': picking_name, 'origin': self.name, 'date': self.date_to_datetime(self.date_invoice), 'type': type_move, 'state': 'waiting', 'move_type': 'direct', 'invoice_id': self.id, 'partner_id': partner, 'note': self.comment, 'account_analytic_id': account_analytic.id, 'invoice_state': 'invoiced', 'company_id': self.company_id.id, 'stock_move_type_id': move_type_id[0].id, 'location_id': location.id, 'location_dest_id': location_dest.id, 'picking_type_id': picking_type } def date_to_datetime(self, userdate): user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT) if self._context and self._context.get('tz'): tz_name = self._context['tz'] else: tz_name = self.env['res.users'].browse(self._uid).read( ['tz'])[0]['tz'] if tz_name: utc = pytz.timezone('UTC') context_tz = pytz.timezone(tz_name) user_datetime = user_date + relativedelta(hours=12.0) local_timestamp = context_tz.localize(user_datetime, is_dst=False) user_datetime = local_timestamp.astimezone(utc) return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT) return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT) # def _prepare_order_line_procurement(self, line, move_id, date_planned): # warehouse_id = self.account_analytic_id.warehouse_id # return{ # 'name': line.name[:50], # 'origin': self.name, # 'date_planned': date_planned, # 'product_id': line.product_id.id, # 'product_qty': line.quantity, # 'product_uom': line.product_id.uom_id.id, # 'product_uos_qty': line.product_id.uom_id.id, # 'product_uos': line.product_id.uom_id.id, # 'location_id': warehouse_id.wh_output_stock_loc_id.id, # 'move_dest_id': move_id, # 'company_id': self.company_id.id, # 'note': line.name, # } # def ship_recreate(self, line, move_id, proc_id): # move_obj = self.env['stock.move'] # proc_obj = self.env['procurement.order'] # if move_id and self.state == 'shipping_except': # cur_mov = move_obj.browse(move_id) # moves = [] # for pick in self.picking_ids: # if pick.id != cur_mov.picking_id.id and pick.state != 'cancel': # moves.extend( # move for move in pick.move_lines if move.state != # 'cancel' and move.invoice_line_id.id == line.id) # if moves: # product_qty = cur_mov.product_qty # product_uos_qty = cur_mov.product_uos_qty # for move in moves: # product_qty -= move.product_qty # product_uos_qty -= move.product_uos_qty # if product_qty > 0 or product_uos_qty > 0: # move_id.product_qty = product_qty # move_id.product_uos_qty = product_uos_qty # proc_id.product_qty = product_qty # proc_id.product_uos_qty = product_uos_qty # else: # cur_mov.unlink() # proc_obj.unlink([proc_id]) # return True def _prepare_order_line_move(self, line, picking_id, date_planned, type_move): location_obj = self.env['stock.location'] move_type_obj = self.env['stock.move.type'] product_obj = self.env['product.product'] price_unit = False is_manufacturer = self.company_id.is_manufacturer if type_move == 'out': move_type_id = move_type_obj.search([('code', '=', 'S1')]) or False account_analytic = line.account_analytic_id warehouse_id = account_analytic.warehouse_id location = location_obj.search([ ('stock_warehouse_id', '=', warehouse_id.id), ('type_stock_loc', '=', 'fp')]) location_dest = self.partner_id.property_stock_customer partner = self.partner_shipping_id.id # picking_type = warehouse_id.out_type_id.id else: move_type_id = move_type_obj.search([('code', '=', 'E2')]) or False account_analytic = self.account_analytic_id warehouse_id = account_analytic.warehouse_id location = self.partner_id.property_stock_supplier if is_manufacturer: location_dest = location_obj.search([ ('stock_warehouse_id', '=', warehouse_id.id), ('type_stock_loc', '=', 'rm')]) else: location_dest = location_obj.search([ ('stock_warehouse_id', '=', warehouse_id.id), ('type_stock_loc', '=', 'fp')]) partner = self.partner_id.id price_unit = line.price_unit if line.invoice_line_tax_ids: price_unit = line.invoice_line_tax_ids.with_context( round=False).compute_all( price_unit, currency=line.invoice_id.currency_id, quantity=1.0)['total_excluded'] if line.uom_id.id != line.product_id.uom_id.id: price_unit *= line.uom_id.factor / line.product_id.uom_id.factor if line.invoice_id.currency_id != line.invoice_id.company_id.currency_id: price_unit = line.invoice_id.currency_id.compute( price_unit, line.invoice_id.company_id.currency_id, round=False) # picking_type = warehouse_id.in_type_id.id # warehouse_id = line.account_analytic_id.warehouse_id # location_id = location_obj.search([ # ('stock_warehouse_id', '=', warehouse_id.id), # ('type_stock_loc', '=', 'fp')]) # output_id = self.partner_id.property_stock_customer.id # move_type_obj = self.env['stock.move.type'] # move_type_id = move_type_obj.search([('code', '=', 'S1')]) or False self._cr.execute( """ WITH RECURSIVE bom_detail(id_product, code, qty, id_bom, phantom, lv) AS( SELECT pp.id, pp.default_code, CAST(1.000000 AS numeric), mb.id, CASE WHEN mb.type = 'phantom' AND %s = 'out' AND %s IS TRUE THEN TRUE ELSE FALSE END, 1 FROM product_product AS pp LEFT JOIN mrp_bom AS mb ON pp.id = mb.product_id WHERE pp.id = %s UNION SELECT pp.id, pp.default_code, ROUND(bd.qty * mbl.product_qty, 6), mb.id, CASE WHEN mb.type = 'phantom' THEN TRUE ELSE FALSE END, bd.lv + 1 FROM mrp_bom_line AS mbl JOIN bom_detail AS bd ON mbl.bom_id = bd.id_bom JOIN product_product AS pp ON mbl.product_id = pp.id LEFT JOIN mrp_bom AS mb ON pp.id = mb.product_id WHERE bd.phantom AND %s = 'out' AND %s IS TRUE ) SELECT * FROM bom_detail WHERE phantom IS FALSE""", ( [type_move, is_manufacturer, line.product_id.id, type_move, is_manufacturer])) res = [] if self._cr.rowcount: products = self._cr.fetchall() for prod in products: product = product_obj.browse([prod[0]]) if not price_unit: price_unit = product.standard_price or 0.0 move_dict = { 'name': line.name[:50], 'picking_id': picking_id.id, 'product_id': product.id, 'date': date_planned, 'date_expected': date_planned, 'product_uom_qty': line.quantity * prod[2], 'product_uom': product.uom_id.id, 'product_uos_qty': product.uom_id.id, 'product_uos': product.uom_id.id, 'product_packaging': False, 'partner_id': partner, 'location_id': location.id, 'location_dest_id': location_dest.id, 'invoice_line_id': line.id, 'tracking_id': False, 'company_id': self.company_id.id, 'price_unit': price_unit, 'stock_move_type_id': move_type_id[0].id, } res.append(move_dict) return res @api.multi def cancel_picking(self): invoice_obj = self.env['account.invoice'] for invoice in self: if not invoice.picking_id: raise UserError(_('This invoice not picking')) invoices = invoice_obj.search( [('picking_id', '=', invoice.picking_id.id), ('state', '!=', 'cancel')]) if invoices: raise UserError(_('Facturas vivas')) moves = [move for move in invoice.picking_id.move_lines] for move in moves: if move.acc_move_id: move.acc_move_id.write({'state': 'draft'}) move.acc_move_id.unlink() move.write({'state': 'cancel'}) invoice.picking_id.write({'state': 'cancel'})
agpl-3.0
inares/edx-platform
lms/djangoapps/shoppingcart/admin.py
63
5399
"""Django admin interface for the shopping cart models. """ from ratelimitbackend import admin from shoppingcart.models import ( PaidCourseRegistrationAnnotation, Coupon, DonationConfiguration, Invoice, CourseRegistrationCodeInvoiceItem, InvoiceTransaction ) class SoftDeleteCouponAdmin(admin.ModelAdmin): """ Admin for the Coupon table. soft-delete on the coupons """ fields = ('code', 'description', 'course_id', 'percentage_discount', 'created_by', 'created_at', 'is_active') raw_id_fields = ("created_by",) readonly_fields = ('created_at',) actions = ['really_delete_selected'] def queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ # Default: qs = self.model._default_manager.get_active_coupons_query_set() # Queryset with all the coupons including the soft-deletes: qs = self.model._default_manager.get_queryset() query_string = self.model._default_manager.get_active_coupons_queryset() # pylint: disable=protected-access return query_string def get_actions(self, request): actions = super(SoftDeleteCouponAdmin, self).get_actions(request) del actions['delete_selected'] return actions def really_delete_selected(self, request, queryset): """override the default behavior of selected delete method""" for obj in queryset: obj.is_active = False obj.save() if queryset.count() == 1: message_bit = "1 coupon entry was" else: message_bit = "%s coupon entries were" % queryset.count() self.message_user(request, "%s successfully deleted." % message_bit) def delete_model(self, request, obj): """override the default behavior of single instance of model delete method""" obj.is_active = False obj.save() really_delete_selected.short_description = "Delete s selected entries" class CourseRegistrationCodeInvoiceItemInline(admin.StackedInline): """Admin for course registration code invoice items. Displayed inline within the invoice admin UI. """ model = CourseRegistrationCodeInvoiceItem extra = 0 can_delete = False readonly_fields = ( 'qty', 'unit_price', 'currency', 'course_id', ) def has_add_permission(self, request): return False class InvoiceTransactionInline(admin.StackedInline): """Admin for invoice transactions. Displayed inline within the invoice admin UI. """ model = InvoiceTransaction extra = 0 readonly_fields = ( 'created', 'modified', 'created_by', 'last_modified_by' ) class InvoiceAdmin(admin.ModelAdmin): """Admin for invoices. This is intended for the internal finance team to be able to view and update invoice information, including payments and refunds. """ date_hierarchy = 'created' can_delete = False readonly_fields = ('created', 'modified') search_fields = ( 'internal_reference', 'customer_reference_number', 'company_name', ) fieldsets = ( ( None, { 'fields': ( 'internal_reference', 'customer_reference_number', 'created', 'modified', ) } ), ( 'Billing Information', { 'fields': ( 'company_name', 'company_contact_name', 'company_contact_email', 'recipient_name', 'recipient_email', 'address_line_1', 'address_line_2', 'address_line_3', 'city', 'state', 'zip', 'country' ) } ) ) readonly_fields = ( 'internal_reference', 'customer_reference_number', 'created', 'modified', 'company_name', 'company_contact_name', 'company_contact_email', 'recipient_name', 'recipient_email', 'address_line_1', 'address_line_2', 'address_line_3', 'city', 'state', 'zip', 'country' ) inlines = [ CourseRegistrationCodeInvoiceItemInline, InvoiceTransactionInline ] def save_formset(self, request, form, formset, change): """Save the user who created and modified invoice transactions. """ instances = formset.save(commit=False) for instance in instances: if isinstance(instance, InvoiceTransaction): if not hasattr(instance, 'created_by'): instance.created_by = request.user instance.last_modified_by = request.user instance.save() def has_add_permission(self, request): return False def has_delete_permission(self, request, obj=None): return False admin.site.register(PaidCourseRegistrationAnnotation) admin.site.register(Coupon, SoftDeleteCouponAdmin) admin.site.register(DonationConfiguration) admin.site.register(Invoice, InvoiceAdmin)
agpl-3.0
DennisDenuto/puppet-commonscripts
files/aws_cli/AWS-ElasticBeanstalk-CLI-2.6.3/eb/linux/python3/lib/elasticbeanstalk/request.py
8
11961
#!/usr/bin/env python #============================================================================== # Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Amazon Software License (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/asl/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or # implied. See the License for the specific language governing permissions # and limitations under the License. #============================================================================== from lib.utility import misc class TemplateSnippet(object): def __init__(self, snippet_name = None, source_url = None, order = None): self._snippet_name = snippet_name self._source_url = source_url self._order = order @property def snippet_name(self): return self._snippet_name @property def source_url(self): return self._source_url @property def order(self): return self._order @snippet_name.setter def snippet_name(self, snippet_name): self._snippet_name = snippet_name @source_url.setter def source_url(self, source_url): self._source_url = source_url @order.setter def order(self, order): self._order = order class Version(object): def __init__(self, application_name = None, version_label = None): self._application_name = application_name self._version_label = version_label @property def application_name(self): return self._application_name @property def version_label(self): return self._version_label @application_name.setter def application_name(self, application_name): self._application_name = application_name @version_label.setter def version_label(self, version_label): self._version_label = version_label class TemplateSpecification(object): def __init__(self, template_source = None, template_snippets = None): if template_source is None: self._template_source = TemplateSource() else: self._template_source = template_source if template_snippets is None: self._template_snippets = list() else: self._template_snippets = template_snippets @property def template_source(self): return self._template_source @property def template_snippets(self): return self._template_snippets @template_source.setter def template_source(self, template_source): self._template_source = template_source @template_snippets.setter def template_snippets(self, snippets): self._template_snippets = snippets class TemplateSource(object): def __init__(self, solution_stack_name = None): self._solution_stack_name = solution_stack_name @property def solution_stack_name(self): return self._solution_stack_name @solution_stack_name.setter def solution_stack_name(self, solution_stack_name): self._solution_stack_name = solution_stack_name class Request(object): ''' Convert and store EB request parameters ''' def __init__(self): self._request = dict() def _set_to_list(self, name_set): name_list = [] if isinstance(name_set, set): for name in name_set: name_list.append(str(name)) else: name_list.append(str(name_set)) return name_list def _check_boolean(self, switch): if isinstance(switch, bool): if switch: return 'true' else: return 'false' else: return switch def __repr__(self): try: text = 'Request API: {0}. \nParameters: [\n'.format(self._request['Operation']) except: text = 'Parameters:[\n' for key,value in self._request.items(): text = text + ' {0} : {1}\n'.format(key, value) text = text + ']' return text def get_dict(self): return self._request def set_operation(self, name): self._request['Operation'] = misc.to_unicode(name) def set_app_name(self, name): self._request['ApplicationName'] = misc.to_unicode(name) def set_app_names(self, name_set): name_list = self._set_to_list(name_set) for i in range(len(name_list)): self._request['ApplicationNames.member.' + misc.to_unicode(i + 1)] \ = misc.to_unicode(name_list[i]) def set_version_label(self, name): self._request['VersionLabel'] = misc.to_unicode(name) def set_description(self, description): self._request['Description'] = misc.to_unicode(description) def set_s3bucket(self, bucket): self._request['SourceBundle.S3Bucket'] = misc.to_unicode(bucket) def set_s3key(self, key): self._request['SourceBundle.S3Key'] = misc.to_unicode(key) def set_auto_create_app(self, switch): switch = self._check_boolean(switch) self._request['AutoCreateApplication'] = misc.to_unicode(switch) def set_env_name(self, name): self._request['EnvironmentName'] = misc.to_unicode(name) def set_env_id(self, env_id): self._request['EnvironmentId'] = misc.to_unicode(env_id) def set_env_names(self, name_set): name_list = self._set_to_list(name_set) for i in range(len(name_list)): self._request['EnvironmentNames.member.' + misc.to_unicode(i + 1)] \ = misc.to_unicode(name_list[i]) def set_env_ids(self, id_set): id_list = self._set_to_list(id_set) for i in range(len(id_list)): self._request['EnvironmentIds.member.' + misc.to_unicode(i + 1)] \ = misc.to_unicode(id_list[i]) def set_cname(self, name): self._request['CNAMEPrefix'] = misc.to_unicode(name) def set_source_configuration(self, name): self._request['SourceConfiguration'] = misc.to_unicode(name) def set_template(self, name): self._request['TemplateName'] = misc.to_unicode(name) def set_solution_stack(self, name): self._request['SolutionStackName'] = misc.to_unicode(name) def set_options(self, options_to_describe): index = 1 for namespace, options in options_to_describe.items(): for option_name in options: self._request['Options.member.' + misc.to_unicode(index) + '.Namespace'] \ = misc.to_unicode(namespace) self._request['Options.member.' + misc.to_unicode(index) + '.OptionName'] \ = misc.to_unicode(option_name) index = index + 1 def set_option_settings(self, option_settings): index = 1 for namespace, options in option_settings.items(): for option_name, value in options.items(): self._request['OptionSettings.member.' + misc.to_unicode(index) + '.Namespace'] \ = misc.to_unicode(namespace) self._request['OptionSettings.member.' + misc.to_unicode(index) + '.OptionName'] \ = misc.to_unicode(option_name) self._request['OptionSettings.member.' + misc.to_unicode(index) + '.Value'] \ = misc.to_unicode(value) index = index + 1 def set_options_to_remove(self, options_to_remove): index = 1 for namespace, options in options_to_remove.items(): for option_name in options: self._request['OptionsToRemove.member.' + misc.to_unicode(index) + '.Namespace'] \ = misc.to_unicode(namespace) self._request['OptionsToRemove.member.' + misc.to_unicode(index) + '.OptionName'] \ = misc.to_unicode(option_name) index = index + 1 def set_include_deleted(self, switch): switch = self._check_boolean(switch) self._request['IncludeDeleted'] = misc.to_unicode(switch) def set_included_deleted_backto(self, datetime): self._request['IncludedDeletedBackTo'] = misc.to_unicode(datetime) def set_start_time(self, datetime): self._request['StartTime'] = misc.to_unicode(datetime) def set_end_time(self, datetime): self._request['EndTime'] = misc.to_unicode(datetime) def set_max_records(self, num): self._request['MaxRecords'] = misc.to_unicode(num) def set_next_token(self, token): self._request['NextToken'] = misc.to_unicode(token) def set_requst_id(self, request_id): self._request['RequestId'] = misc.to_unicode(request_id) def set_severity(self, severity): self._request['Severity'] = misc.to_unicode(severity) def set_terminate_env(self, switch): self._request['TerminateEnvByForce'] = misc.to_unicode(switch) def set_delete_source_bundle(self, switch): self._request['DeleteSourceBundle'] = misc.to_unicode(switch) def set_terminate_resources(self, switch): self._request['TerminateResources'] = misc.to_unicode(switch) def set_template_specification(self, template_spec): #TemplateSource if template_spec.template_source is not None: ts = template_spec.template_source if ts.solution_stack_name is not None: self._request['TemplateSpecification.TemplateSource.SolutionStackName'] \ = misc.to_unicode(ts.solution_stack_name) #Snippets if template_spec.template_snippets is not None: for i, snippet in enumerate(template_spec.template_snippets): if snippet.snippet_name is not None: self._request['TemplateSpecification.TemplateSnippets.member.' \ + misc.to_unicode(i + 1)+'.SnippetName'] \ = misc.to_unicode(snippet.snippet_name) if snippet.source_url is not None: self._request['TemplateSpecification.TemplateSnippets.member.' \ + misc.to_unicode(i + 1)+'.SourceUrl'] \ = misc.to_unicode(snippet.source_url) if snippet.order is not None: self._request['TemplateSpecification.TemplateSnippets.member.' \ + misc.to_unicode(i + 1)+'.Order'] \ = misc.to_unicode(snippet.order) def set_tier(self, environment_tier): self._request['Tier.Name'] = misc.to_unicode(environment_tier.name) self._request['Tier.Type'] = misc.to_unicode(environment_tier.type) self._request['Tier.Version'] = misc.to_unicode(environment_tier.version) def set_info_type(self, info_type): self._request['InfoType'] = misc.to_unicode(info_type) class Response(object): def __init__(self, request_id, result = None, next_token = None): self._request_id = request_id self._result = result self._next_token = next_token def __repr__(self): return 'API Response.\n Request ID: {0}\n Results: {1}'.\ format(self.request_id, misc.collection_to_string(self._result)) @property def request_id(self): return self._request_id @property def result(self): return self._result @property def next_token(self): return self._next_token
mit
seladb/PcapPlusPlus
Tests/ExamplesTest/tests/test_pcapsplitter.py
1
9849
import pytest import os import ipaddress from scapy.all import rdpcap, IP, IPv6, TCP, UDP from .test_utils import ExampleTest class TestPcapSplitter(ExampleTest): pytestmark = [pytest.mark.pcapsplitter, pytest.mark.no_network] def test_split_by_file_size(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'file-size', '-p': '100000' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 30 for filename in os.listdir(tmpdir): if not os.path.splitext(filename)[0].endswith('29'): assert 98500 <= os.path.getsize(os.path.join(tmpdir, filename)) <= 101500 def test_split_by_packet_count(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'packet-count', '-p': '300' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 16 for filename in os.listdir(tmpdir): if not os.path.splitext(filename)[0].endswith('15'): packets = rdpcap(os.path.join(tmpdir, filename)) assert len(packets) == 300 def test_split_by_client_ip(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'client-ip' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 5 for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) if os.path.splitext(filename)[0].endswith('miscellaneous'): for packet in packets: assert not packet.haslayer(TCP) and not packet.haslayer(UDP) else: ip_addr = os.path.splitext(filename)[0][25:] try: ip_addr = ipaddress.ip_address(ip_addr.replace('-', '.')) except ValueError: ip_addr = ipaddress.ip_address(ip_addr.replace('-', ':')) for packet in packets: assert packet.haslayer(TCP) or packet.haslayer(UDP) if isinstance(ip_addr, ipaddress.IPv4Address): assert packet.haslayer(IP) assert ipaddress.ip_address(packet[IP].src) == ip_addr or ipaddress.ip_address(packet[IP].dst) == ip_addr else: assert packet.haslayer(IPv6) assert ipaddress.ip_address(packet[IPv6].src) == ip_addr or ipaddress.ip_address(packet[IPv6].dst) == ip_addr def test_split_by_server_ip(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'server-ip' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 60 for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) if os.path.splitext(filename)[0].endswith('miscellaneous'): for packet in packets: assert not packet.haslayer(TCP) and not packet.haslayer(UDP) else: ip_addr = os.path.splitext(filename)[0][25:] try: ip_addr = ipaddress.ip_address(ip_addr.replace('-', '.')) except ValueError: ip_addr = ipaddress.ip_address(ip_addr.replace('-', ':')) for packet in packets: assert packet.haslayer(TCP) or packet.haslayer(UDP) if isinstance(ip_addr, ipaddress.IPv4Address): assert packet.haslayer(IP) assert ipaddress.ip_address(packet[IP].src) == ip_addr or ipaddress.ip_address(packet[IP].dst) == ip_addr else: assert packet.haslayer(IPv6) assert ipaddress.ip_address(packet[IPv6].src) == ip_addr or ipaddress.ip_address(packet[IPv6].dst) == ip_addr def test_split_by_server_port(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'server-port' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 7 for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) if os.path.splitext(filename)[0].endswith('miscellaneous'): for packet in packets: assert not packet.haslayer(TCP) and not packet.haslayer(UDP) else: server_port = int(os.path.splitext(filename)[0][27:]) for packet in packets: assert (packet.haslayer(TCP) and (packet[TCP].sport == server_port or packet[TCP].dport == server_port)) or \ (packet.haslayer(UDP) and (packet[UDP].sport == server_port or packet[UDP].dport == server_port)) def test_split_by_client_port(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'client-port' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 254 for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) if os.path.splitext(filename)[0].endswith('miscellaneous'): for packet in packets: assert not packet.haslayer(TCP) and not packet.haslayer(UDP) else: client_port = int(os.path.splitext(filename)[0][27:]) for packet in packets: assert (packet.haslayer(TCP) and (packet[TCP].sport == client_port or packet[TCP].dport == client_port)) or \ (packet.haslayer(UDP) and (packet[UDP].sport == client_port or packet[UDP].dport == client_port)) def test_split_by_ip_src_dst(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'ip-src-dst' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 65 ip_src_dst_map = {} for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) if packets[0].haslayer(IP): ip_type = IP ip_src_dst = frozenset([packets[0][IP].src, packets[0][IP].dst]) elif packets[0].haslayer(IPv6): ip_type = IPv6 ip_src_dst = frozenset([packets[0][IPv6].src, packets[0][IPv6].dst]) else: non_ip = frozenset([]) assert non_ip not in ip_src_dst_map ip_src_dst_map[non_ip] = True continue assert ip_src_dst not in ip_src_dst_map ip_src_dst_map[ip_src_dst] = True for packet in packets: assert packet.haslayer(ip_type) assert ip_src_dst == frozenset([packet[ip_type].src, packet[ip_type].dst]) def test_split_by_connection(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'connection' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 254 connection_map = {} for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) if packets[0].haslayer(TCP): trans_layer = TCP elif packets[0].haslayer(UDP): trans_layer = UDP else: trans_layer = None if trans_layer is not None: net_layer = IP if packets[0].haslayer(IP) else IPv6 else: net_layer = None if net_layer is not None and trans_layer is not None: conn = frozenset([trans_layer, \ packets[0][net_layer].src, packets[0][net_layer].dst, \ packets[0][trans_layer].sport, packets[0][trans_layer].dport]) else: conn = frozenset([]) assert not conn in connection_map connection_map[conn] = True if len(conn) == 0: continue for packet in packets: assert packet.haslayer(net_layer) and packet.haslayer(trans_layer) packet_conn = frozenset([trans_layer, \ packet[net_layer].src, packet[net_layer].dst, \ packet[trans_layer].sport, packet[trans_layer].dport]) assert packet_conn == conn def test_split_by_bpf_filter(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'bpf-filter', '-p': 'udp' } self.run_example(args=args) assert len(os.listdir(tmpdir)) == 2 for filename in os.listdir(tmpdir): packets = rdpcap(os.path.join(tmpdir, filename)) match_bpf = not os.path.splitext(filename)[0].endswith('not-match-bpf') for packet in packets: assert packet.haslayer(UDP) == match_bpf def test_split_by_round_robin(self, tmpdir): divide_by = 10 args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir, '-m': 'round-robin', '-p': str(divide_by) } self.run_example(args=args) num_of_packets_per_file = int(len(rdpcap(os.path.join('pcap_examples', 'many-protocols.pcap'))) / divide_by) assert len(os.listdir(tmpdir)) == divide_by for filename in os.listdir(tmpdir): assert num_of_packets_per_file <= len(rdpcap(os.path.join(tmpdir, filename))) <= num_of_packets_per_file + 1 def test_input_file_not_given(self): args = {} completed_process = self.run_example(args=args, expected_return_code=1) assert 'Error: Input file name was not given' in completed_process.stdout def test_output_dir_not_given(self): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap') } completed_process = self.run_example(args=args, expected_return_code=1) assert 'Error: Output directory name was not given' in completed_process.stdout def test_split_method_not_given(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': tmpdir } completed_process = self.run_example(args=args, expected_return_code=1) assert 'Error: Split method was not given' in completed_process.stdout def test_output_dir_not_exist(self): args = { '-f': os.path.join('pcap_examples', 'many-protocols.pcap'), '-o': 'blablablalba12345' } completed_process = self.run_example(args=args, expected_return_code=1) assert 'Error: Output directory doesn\'t exist' in completed_process.stdout def test_input_file_not_exist(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols123.pcap'), '-o': tmpdir, '-m': 'ip-src-dst' } completed_process = self.run_example(args=args, expected_return_code=1) assert 'Error: Error opening input pcap file' in completed_process.stdout def test_split_method_not_exist(self, tmpdir): args = { '-f': os.path.join('pcap_examples', 'many-protocols123.pcap'), '-o': tmpdir, '-m': 'blabla' } completed_process = self.run_example(args=args, expected_return_code=1) assert 'Error: Unknown method \'blabla\'' in completed_process.stdout
unlicense
yongtang/tensorflow
tensorflow/python/saved_model/simple_save.py
25
4169
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel simple save functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.saved_model import builder from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['saved_model.simple_save']) @deprecation.deprecated( None, 'This function will only be available through the v1 compatibility ' 'library as tf.compat.v1.saved_model.simple_save.') def simple_save(session, export_dir, inputs, outputs, legacy_init_op=None): """Convenience function to build a SavedModel suitable for serving. In many common cases, saving models for serving will be as simple as: simple_save(session, export_dir, inputs={"x": x, "y": y}, outputs={"z": z}) Although in many cases it's not necessary to understand all of the many ways to configure a SavedModel, this method has a few practical implications: - It will be treated as a graph for inference / serving (i.e. uses the tag `saved_model.SERVING`) - The SavedModel will load in TensorFlow Serving and supports the [Predict API](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/predict.proto). To use the Classify, Regress, or MultiInference APIs, please use either [tf.Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator) or the lower level [SavedModel APIs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md). - Some TensorFlow ops depend on information on disk or other information called "assets". These are generally handled automatically by adding the assets to the `GraphKeys.ASSET_FILEPATHS` collection. Only assets in that collection are exported; if you need more custom behavior, you'll need to use the [SavedModelBuilder](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py). More information about SavedModel and signatures can be found here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md. Args: session: The TensorFlow session from which to save the meta graph and variables. export_dir: The path to which the SavedModel will be stored. inputs: dict mapping string input names to tensors. These are added to the SignatureDef as the inputs. outputs: dict mapping string output names to tensors. These are added to the SignatureDef as the outputs. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load. """ signature_def_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def_utils.predict_signature_def(inputs, outputs) } b = builder.SavedModelBuilder(export_dir) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), main_op=legacy_init_op, clear_devices=True) b.save()
apache-2.0
ktan2020/legacy-automation
win/Lib/test/test_descr.py
7
162732
import __builtin__ import sys import types import unittest from copy import deepcopy from test import test_support class OperatorsTest(unittest.TestCase): def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.binops = { 'add': '+', 'sub': '-', 'mul': '*', 'div': '/', 'divmod': 'divmod', 'pow': '**', 'lshift': '<<', 'rshift': '>>', 'and': '&', 'xor': '^', 'or': '|', 'cmp': 'cmp', 'lt': '<', 'le': '<=', 'eq': '==', 'ne': '!=', 'gt': '>', 'ge': '>=', } for name, expr in self.binops.items(): if expr.islower(): expr = expr + "(a, b)" else: expr = 'a %s b' % expr self.binops[name] = expr self.unops = { 'pos': '+', 'neg': '-', 'abs': 'abs', 'invert': '~', 'int': 'int', 'long': 'long', 'float': 'float', 'oct': 'oct', 'hex': 'hex', } for name, expr in self.unops.items(): if expr.islower(): expr = expr + "(a)" else: expr = '%s a' % expr self.unops[name] = expr def unop_test(self, a, res, expr="len(a)", meth="__len__"): d = {'a': a} self.assertEqual(eval(expr, d), res) t = type(a) m = getattr(t, meth) # Find method in parent class while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) self.assertEqual(m(a), res) bm = getattr(a, meth) self.assertEqual(bm(), res) def binop_test(self, a, b, res, expr="a+b", meth="__add__"): d = {'a': a, 'b': b} # XXX Hack so this passes before 2.3 when -Qnew is specified. if meth == "__div__" and 1/2 == 0.5: meth = "__truediv__" if meth == '__divmod__': pass self.assertEqual(eval(expr, d), res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) self.assertEqual(m(a, b), res) bm = getattr(a, meth) self.assertEqual(bm(b), res) def ternop_test(self, a, b, c, res, expr="a[b:c]", meth="__getslice__"): d = {'a': a, 'b': b, 'c': c} self.assertEqual(eval(expr, d), res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) self.assertEqual(m(a, b, c), res) bm = getattr(a, meth) self.assertEqual(bm(b, c), res) def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"): d = {'a': deepcopy(a), 'b': b} exec stmt in d self.assertEqual(d['a'], res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) d['a'] = deepcopy(a) m(d['a'], b) self.assertEqual(d['a'], res) d['a'] = deepcopy(a) bm = getattr(d['a'], meth) bm(b) self.assertEqual(d['a'], res) def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"): d = {'a': deepcopy(a), 'b': b, 'c': c} exec stmt in d self.assertEqual(d['a'], res) t = type(a) m = getattr(t, meth) while meth not in t.__dict__: t = t.__bases__[0] # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) d['a'] = deepcopy(a) m(d['a'], b, c) self.assertEqual(d['a'], res) d['a'] = deepcopy(a) bm = getattr(d['a'], meth) bm(b, c) self.assertEqual(d['a'], res) def set3op_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"): dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d} exec stmt in dictionary self.assertEqual(dictionary['a'], res) t = type(a) while meth not in t.__dict__: t = t.__bases__[0] m = getattr(t, meth) # in some implementations (e.g. PyPy), 'm' can be a regular unbound # method object; the getattr() below obtains its underlying function. self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth]) dictionary['a'] = deepcopy(a) m(dictionary['a'], b, c, d) self.assertEqual(dictionary['a'], res) dictionary['a'] = deepcopy(a) bm = getattr(dictionary['a'], meth) bm(b, c, d) self.assertEqual(dictionary['a'], res) def test_lists(self): # Testing list operations... # Asserts are within individual test methods self.binop_test([1], [2], [1,2], "a+b", "__add__") self.binop_test([1,2,3], 2, 1, "b in a", "__contains__") self.binop_test([1,2,3], 4, 0, "b in a", "__contains__") self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__") self.ternop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__") self.setop_test([1], [2], [1,2], "a+=b", "__iadd__") self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__") self.unop_test([1,2,3], 3, "len(a)", "__len__") self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__") self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__") self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__") self.set3op_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d", "__setslice__") def test_dicts(self): # Testing dict operations... if hasattr(dict, '__cmp__'): # PyPy has only rich comparison on dicts self.binop_test({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__") else: self.binop_test({1:2}, {2:1}, True, "a < b", "__lt__") self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__") self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__") self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__") d = {1:2, 3:4} l1 = [] for i in d.keys(): l1.append(i) l = [] for i in iter(d): l.append(i) self.assertEqual(l, l1) l = [] for i in d.__iter__(): l.append(i) self.assertEqual(l, l1) l = [] for i in dict.__iter__(d): l.append(i) self.assertEqual(l, l1) d = {1:2, 3:4} self.unop_test(d, 2, "len(a)", "__len__") self.assertEqual(eval(repr(d), {}), d) self.assertEqual(eval(d.__repr__(), {}), d) self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c", "__setitem__") # Tests for unary and binary operators def number_operators(self, a, b, skip=[]): dict = {'a': a, 'b': b} for name, expr in self.binops.items(): if name not in skip: name = "__%s__" % name if hasattr(a, name): res = eval(expr, dict) self.binop_test(a, b, res, expr, name) for name, expr in self.unops.items(): if name not in skip: name = "__%s__" % name if hasattr(a, name): res = eval(expr, dict) self.unop_test(a, res, expr, name) def test_ints(self): # Testing int operations... self.number_operators(100, 3) # The following crashes in Python 2.2 self.assertEqual((1).__nonzero__(), 1) self.assertEqual((0).__nonzero__(), 0) # This returns 'NotImplemented' in Python 2.2 class C(int): def __add__(self, other): return NotImplemented self.assertEqual(C(5L), 5) try: C() + "" except TypeError: pass else: self.fail("NotImplemented should have caused TypeError") try: C(sys.maxint+1) except OverflowError: pass else: self.fail("should have raised OverflowError") def test_longs(self): # Testing long operations... self.number_operators(100L, 3L) def test_floats(self): # Testing float operations... self.number_operators(100.0, 3.0) def test_complexes(self): # Testing complex operations... self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge', 'int', 'long', 'float']) class Number(complex): __slots__ = ['prec'] def __new__(cls, *args, **kwds): result = complex.__new__(cls, *args) result.prec = kwds.get('prec', 12) return result def __repr__(self): prec = self.prec if self.imag == 0.0: return "%.*g" % (prec, self.real) if self.real == 0.0: return "%.*gj" % (prec, self.imag) return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag) __str__ = __repr__ a = Number(3.14, prec=6) self.assertEqual(repr(a), "3.14") self.assertEqual(a.prec, 6) a = Number(a, prec=2) self.assertEqual(repr(a), "3.1") self.assertEqual(a.prec, 2) a = Number(234.5) self.assertEqual(repr(a), "234.5") self.assertEqual(a.prec, 12) @test_support.impl_detail("the module 'xxsubtype' is internal") def test_spam_lists(self): # Testing spamlist operations... import copy, xxsubtype as spam def spamlist(l, memo=None): import xxsubtype as spam return spam.spamlist(l) # This is an ugly hack: copy._deepcopy_dispatch[spam.spamlist] = spamlist self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b", "__add__") self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__") self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__") self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__") self.ternop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]", "__getslice__") self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b", "__iadd__") self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b", "__imul__") self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__") self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b", "__mul__") self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a", "__rmul__") self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c", "__setitem__") self.set3op_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]), spamlist([1,5,6,4]), "a[b:c]=d", "__setslice__") # Test subclassing class C(spam.spamlist): def foo(self): return 1 a = C() self.assertEqual(a, []) self.assertEqual(a.foo(), 1) a.append(100) self.assertEqual(a, [100]) self.assertEqual(a.getstate(), 0) a.setstate(42) self.assertEqual(a.getstate(), 42) @test_support.impl_detail("the module 'xxsubtype' is internal") def test_spam_dicts(self): # Testing spamdict operations... import copy, xxsubtype as spam def spamdict(d, memo=None): import xxsubtype as spam sd = spam.spamdict() for k, v in d.items(): sd[k] = v return sd # This is an ugly hack: copy._deepcopy_dispatch[spam.spamdict] = spamdict self.binop_test(spamdict({1:2}), spamdict({2:1}), -1, "cmp(a,b)", "__cmp__") self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__") self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__") self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__") d = spamdict({1:2,3:4}) l1 = [] for i in d.keys(): l1.append(i) l = [] for i in iter(d): l.append(i) self.assertEqual(l, l1) l = [] for i in d.__iter__(): l.append(i) self.assertEqual(l, l1) l = [] for i in type(spamdict({})).__iter__(d): l.append(i) self.assertEqual(l, l1) straightd = {1:2, 3:4} spamd = spamdict(straightd) self.unop_test(spamd, 2, "len(a)", "__len__") self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__") self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}), "a[b]=c", "__setitem__") # Test subclassing class C(spam.spamdict): def foo(self): return 1 a = C() self.assertEqual(a.items(), []) self.assertEqual(a.foo(), 1) a['foo'] = 'bar' self.assertEqual(a.items(), [('foo', 'bar')]) self.assertEqual(a.getstate(), 0) a.setstate(100) self.assertEqual(a.getstate(), 100) class ClassPropertiesAndMethods(unittest.TestCase): def test_python_dicts(self): # Testing Python subclass of dict... self.assertTrue(issubclass(dict, dict)) self.assertIsInstance({}, dict) d = dict() self.assertEqual(d, {}) self.assertTrue(d.__class__ is dict) self.assertIsInstance(d, dict) class C(dict): state = -1 def __init__(self_local, *a, **kw): if a: self.assertEqual(len(a), 1) self_local.state = a[0] if kw: for k, v in kw.items(): self_local[v] = k def __getitem__(self, key): return self.get(key, 0) def __setitem__(self_local, key, value): self.assertIsInstance(key, type(0)) dict.__setitem__(self_local, key, value) def setstate(self, state): self.state = state def getstate(self): return self.state self.assertTrue(issubclass(C, dict)) a1 = C(12) self.assertEqual(a1.state, 12) a2 = C(foo=1, bar=2) self.assertEqual(a2[1] == 'foo' and a2[2], 'bar') a = C() self.assertEqual(a.state, -1) self.assertEqual(a.getstate(), -1) a.setstate(0) self.assertEqual(a.state, 0) self.assertEqual(a.getstate(), 0) a.setstate(10) self.assertEqual(a.state, 10) self.assertEqual(a.getstate(), 10) self.assertEqual(a[42], 0) a[42] = 24 self.assertEqual(a[42], 24) N = 50 for i in range(N): a[i] = C() for j in range(N): a[i][j] = i*j for i in range(N): for j in range(N): self.assertEqual(a[i][j], i*j) def test_python_lists(self): # Testing Python subclass of list... class C(list): def __getitem__(self, i): return list.__getitem__(self, i) + 100 def __getslice__(self, i, j): return (i, j) a = C() a.extend([0,1,2]) self.assertEqual(a[0], 100) self.assertEqual(a[1], 101) self.assertEqual(a[2], 102) self.assertEqual(a[100:200], (100,200)) def test_metaclass(self): # Testing __metaclass__... class C: __metaclass__ = type def __init__(self): self.__state = 0 def getstate(self): return self.__state def setstate(self, state): self.__state = state a = C() self.assertEqual(a.getstate(), 0) a.setstate(10) self.assertEqual(a.getstate(), 10) class D: class __metaclass__(type): def myself(cls): return cls self.assertEqual(D.myself(), D) d = D() self.assertEqual(d.__class__, D) class M1(type): def __new__(cls, name, bases, dict): dict['__spam__'] = 1 return type.__new__(cls, name, bases, dict) class C: __metaclass__ = M1 self.assertEqual(C.__spam__, 1) c = C() self.assertEqual(c.__spam__, 1) class _instance(object): pass class M2(object): @staticmethod def __new__(cls, name, bases, dict): self = object.__new__(cls) self.name = name self.bases = bases self.dict = dict return self def __call__(self): it = _instance() # Early binding of methods for key in self.dict: if key.startswith("__"): continue setattr(it, key, self.dict[key].__get__(it, self)) return it class C: __metaclass__ = M2 def spam(self): return 42 self.assertEqual(C.name, 'C') self.assertEqual(C.bases, ()) self.assertIn('spam', C.dict) c = C() self.assertEqual(c.spam(), 42) # More metaclass examples class autosuper(type): # Automatically add __super to the class # This trick only works for dynamic classes def __new__(metaclass, name, bases, dict): cls = super(autosuper, metaclass).__new__(metaclass, name, bases, dict) # Name mangling for __super removes leading underscores while name[:1] == "_": name = name[1:] if name: name = "_%s__super" % name else: name = "__super" setattr(cls, name, super(cls)) return cls class A: __metaclass__ = autosuper def meth(self): return "A" class B(A): def meth(self): return "B" + self.__super.meth() class C(A): def meth(self): return "C" + self.__super.meth() class D(C, B): def meth(self): return "D" + self.__super.meth() self.assertEqual(D().meth(), "DCBA") class E(B, C): def meth(self): return "E" + self.__super.meth() self.assertEqual(E().meth(), "EBCA") class autoproperty(type): # Automatically create property attributes when methods # named _get_x and/or _set_x are found def __new__(metaclass, name, bases, dict): hits = {} for key, val in dict.iteritems(): if key.startswith("_get_"): key = key[5:] get, set = hits.get(key, (None, None)) get = val hits[key] = get, set elif key.startswith("_set_"): key = key[5:] get, set = hits.get(key, (None, None)) set = val hits[key] = get, set for key, (get, set) in hits.iteritems(): dict[key] = property(get, set) return super(autoproperty, metaclass).__new__(metaclass, name, bases, dict) class A: __metaclass__ = autoproperty def _get_x(self): return -self.__x def _set_x(self, x): self.__x = -x a = A() self.assertTrue(not hasattr(a, "x")) a.x = 12 self.assertEqual(a.x, 12) self.assertEqual(a._A__x, -12) class multimetaclass(autoproperty, autosuper): # Merge of multiple cooperating metaclasses pass class A: __metaclass__ = multimetaclass def _get_x(self): return "A" class B(A): def _get_x(self): return "B" + self.__super._get_x() class C(A): def _get_x(self): return "C" + self.__super._get_x() class D(C, B): def _get_x(self): return "D" + self.__super._get_x() self.assertEqual(D().x, "DCBA") # Make sure type(x) doesn't call x.__class__.__init__ class T(type): counter = 0 def __init__(self, *args): T.counter += 1 class C: __metaclass__ = T self.assertEqual(T.counter, 1) a = C() self.assertEqual(type(a), C) self.assertEqual(T.counter, 1) class C(object): pass c = C() try: c() except TypeError: pass else: self.fail("calling object w/o call method should raise " "TypeError") # Testing code to find most derived baseclass class A(type): def __new__(*args, **kwargs): return type.__new__(*args, **kwargs) class B(object): pass class C(object): __metaclass__ = A # The most derived metaclass of D is A rather than type. class D(B, C): pass def test_module_subclasses(self): # Testing Python subclass of module... log = [] MT = type(sys) class MM(MT): def __init__(self, name): MT.__init__(self, name) def __getattribute__(self, name): log.append(("getattr", name)) return MT.__getattribute__(self, name) def __setattr__(self, name, value): log.append(("setattr", name, value)) MT.__setattr__(self, name, value) def __delattr__(self, name): log.append(("delattr", name)) MT.__delattr__(self, name) a = MM("a") a.foo = 12 x = a.foo del a.foo self.assertEqual(log, [("setattr", "foo", 12), ("getattr", "foo"), ("delattr", "foo")]) # http://python.org/sf/1174712 try: class Module(types.ModuleType, str): pass except TypeError: pass else: self.fail("inheriting from ModuleType and str at the same time " "should fail") def test_multiple_inheritence(self): # Testing multiple inheritance... class C(object): def __init__(self): self.__state = 0 def getstate(self): return self.__state def setstate(self, state): self.__state = state a = C() self.assertEqual(a.getstate(), 0) a.setstate(10) self.assertEqual(a.getstate(), 10) class D(dict, C): def __init__(self): type({}).__init__(self) C.__init__(self) d = D() self.assertEqual(d.keys(), []) d["hello"] = "world" self.assertEqual(d.items(), [("hello", "world")]) self.assertEqual(d["hello"], "world") self.assertEqual(d.getstate(), 0) d.setstate(10) self.assertEqual(d.getstate(), 10) self.assertEqual(D.__mro__, (D, dict, C, object)) # SF bug #442833 class Node(object): def __int__(self): return int(self.foo()) def foo(self): return "23" class Frag(Node, list): def foo(self): return "42" self.assertEqual(Node().__int__(), 23) self.assertEqual(int(Node()), 23) self.assertEqual(Frag().__int__(), 42) self.assertEqual(int(Frag()), 42) # MI mixing classic and new-style classes. class A: x = 1 class B(A): pass class C(A): x = 2 class D(B, C): pass self.assertEqual(D.x, 1) # Classic MRO is preserved for a classic base class. class E(D, object): pass self.assertEqual(E.__mro__, (E, D, B, A, C, object)) self.assertEqual(E.x, 1) # But with a mix of classic bases, their MROs are combined using # new-style MRO. class F(B, C, object): pass self.assertEqual(F.__mro__, (F, B, C, A, object)) self.assertEqual(F.x, 2) # Try something else. class C: def cmethod(self): return "C a" def all_method(self): return "C b" class M1(C, object): def m1method(self): return "M1 a" def all_method(self): return "M1 b" self.assertEqual(M1.__mro__, (M1, C, object)) m = M1() self.assertEqual(m.cmethod(), "C a") self.assertEqual(m.m1method(), "M1 a") self.assertEqual(m.all_method(), "M1 b") class D(C): def dmethod(self): return "D a" def all_method(self): return "D b" class M2(D, object): def m2method(self): return "M2 a" def all_method(self): return "M2 b" self.assertEqual(M2.__mro__, (M2, D, C, object)) m = M2() self.assertEqual(m.cmethod(), "C a") self.assertEqual(m.dmethod(), "D a") self.assertEqual(m.m2method(), "M2 a") self.assertEqual(m.all_method(), "M2 b") class M3(M1, M2, object): def m3method(self): return "M3 a" def all_method(self): return "M3 b" self.assertEqual(M3.__mro__, (M3, M1, M2, D, C, object)) m = M3() self.assertEqual(m.cmethod(), "C a") self.assertEqual(m.dmethod(), "D a") self.assertEqual(m.m1method(), "M1 a") self.assertEqual(m.m2method(), "M2 a") self.assertEqual(m.m3method(), "M3 a") self.assertEqual(m.all_method(), "M3 b") class Classic: pass try: class New(Classic): __metaclass__ = type except TypeError: pass else: self.fail("new class with only classic bases - shouldn't be") def test_diamond_inheritence(self): # Testing multiple inheritance special cases... class A(object): def spam(self): return "A" self.assertEqual(A().spam(), "A") class B(A): def boo(self): return "B" def spam(self): return "B" self.assertEqual(B().spam(), "B") self.assertEqual(B().boo(), "B") class C(A): def boo(self): return "C" self.assertEqual(C().spam(), "A") self.assertEqual(C().boo(), "C") class D(B, C): pass self.assertEqual(D().spam(), "B") self.assertEqual(D().boo(), "B") self.assertEqual(D.__mro__, (D, B, C, A, object)) class E(C, B): pass self.assertEqual(E().spam(), "B") self.assertEqual(E().boo(), "C") self.assertEqual(E.__mro__, (E, C, B, A, object)) # MRO order disagreement try: class F(D, E): pass except TypeError: pass else: self.fail("expected MRO order disagreement (F)") try: class G(E, D): pass except TypeError: pass else: self.fail("expected MRO order disagreement (G)") # see thread python-dev/2002-October/029035.html def test_ex5_from_c3_switch(self): # Testing ex5 from C3 switch discussion... class A(object): pass class B(object): pass class C(object): pass class X(A): pass class Y(A): pass class Z(X,B,Y,C): pass self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object)) # see "A Monotonic Superclass Linearization for Dylan", # by Kim Barrett et al. (OOPSLA 1996) def test_monotonicity(self): # Testing MRO monotonicity... class Boat(object): pass class DayBoat(Boat): pass class WheelBoat(Boat): pass class EngineLess(DayBoat): pass class SmallMultihull(DayBoat): pass class PedalWheelBoat(EngineLess,WheelBoat): pass class SmallCatamaran(SmallMultihull): pass class Pedalo(PedalWheelBoat,SmallCatamaran): pass self.assertEqual(PedalWheelBoat.__mro__, (PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object)) self.assertEqual(SmallCatamaran.__mro__, (SmallCatamaran, SmallMultihull, DayBoat, Boat, object)) self.assertEqual(Pedalo.__mro__, (Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran, SmallMultihull, DayBoat, WheelBoat, Boat, object)) # see "A Monotonic Superclass Linearization for Dylan", # by Kim Barrett et al. (OOPSLA 1996) def test_consistency_with_epg(self): # Testing consistency with EPG... class Pane(object): pass class ScrollingMixin(object): pass class EditingMixin(object): pass class ScrollablePane(Pane,ScrollingMixin): pass class EditablePane(Pane,EditingMixin): pass class EditableScrollablePane(ScrollablePane,EditablePane): pass self.assertEqual(EditableScrollablePane.__mro__, (EditableScrollablePane, ScrollablePane, EditablePane, Pane, ScrollingMixin, EditingMixin, object)) def test_mro_disagreement(self): # Testing error messages for MRO disagreement... mro_err_msg = """Cannot create a consistent method resolution order (MRO) for bases """ def raises(exc, expected, callable, *args): try: callable(*args) except exc, msg: # the exact msg is generally considered an impl detail if test_support.check_impl_detail(): if not str(msg).startswith(expected): self.fail("Message %r, expected %r" % (str(msg), expected)) else: self.fail("Expected %s" % exc) class A(object): pass class B(A): pass class C(object): pass # Test some very simple errors raises(TypeError, "duplicate base class A", type, "X", (A, A), {}) raises(TypeError, mro_err_msg, type, "X", (A, B), {}) raises(TypeError, mro_err_msg, type, "X", (A, C, B), {}) # Test a slightly more complex error class GridLayout(object): pass class HorizontalGrid(GridLayout): pass class VerticalGrid(GridLayout): pass class HVGrid(HorizontalGrid, VerticalGrid): pass class VHGrid(VerticalGrid, HorizontalGrid): pass raises(TypeError, mro_err_msg, type, "ConfusedGrid", (HVGrid, VHGrid), {}) def test_object_class(self): # Testing object class... a = object() self.assertEqual(a.__class__, object) self.assertEqual(type(a), object) b = object() self.assertNotEqual(a, b) self.assertFalse(hasattr(a, "foo")) try: a.foo = 12 except (AttributeError, TypeError): pass else: self.fail("object() should not allow setting a foo attribute") self.assertFalse(hasattr(object(), "__dict__")) class Cdict(object): pass x = Cdict() self.assertEqual(x.__dict__, {}) x.foo = 1 self.assertEqual(x.foo, 1) self.assertEqual(x.__dict__, {'foo': 1}) def test_slots(self): # Testing __slots__... class C0(object): __slots__ = [] x = C0() self.assertFalse(hasattr(x, "__dict__")) self.assertFalse(hasattr(x, "foo")) class C1(object): __slots__ = ['a'] x = C1() self.assertFalse(hasattr(x, "__dict__")) self.assertFalse(hasattr(x, "a")) x.a = 1 self.assertEqual(x.a, 1) x.a = None self.assertEqual(x.a, None) del x.a self.assertFalse(hasattr(x, "a")) class C3(object): __slots__ = ['a', 'b', 'c'] x = C3() self.assertFalse(hasattr(x, "__dict__")) self.assertFalse(hasattr(x, 'a')) self.assertFalse(hasattr(x, 'b')) self.assertFalse(hasattr(x, 'c')) x.a = 1 x.b = 2 x.c = 3 self.assertEqual(x.a, 1) self.assertEqual(x.b, 2) self.assertEqual(x.c, 3) class C4(object): """Validate name mangling""" __slots__ = ['__a'] def __init__(self, value): self.__a = value def get(self): return self.__a x = C4(5) self.assertFalse(hasattr(x, '__dict__')) self.assertFalse(hasattr(x, '__a')) self.assertEqual(x.get(), 5) try: x.__a = 6 except AttributeError: pass else: self.fail("Double underscored names not mangled") # Make sure slot names are proper identifiers try: class C(object): __slots__ = [None] except TypeError: pass else: self.fail("[None] slots not caught") try: class C(object): __slots__ = ["foo bar"] except TypeError: pass else: self.fail("['foo bar'] slots not caught") try: class C(object): __slots__ = ["foo\0bar"] except TypeError: pass else: self.fail("['foo\\0bar'] slots not caught") try: class C(object): __slots__ = ["1"] except TypeError: pass else: self.fail("['1'] slots not caught") try: class C(object): __slots__ = [""] except TypeError: pass else: self.fail("[''] slots not caught") class C(object): __slots__ = ["a", "a_b", "_a", "A0123456789Z"] # XXX(nnorwitz): was there supposed to be something tested # from the class above? # Test a single string is not expanded as a sequence. class C(object): __slots__ = "abc" c = C() c.abc = 5 self.assertEqual(c.abc, 5) # Test unicode slot names try: unicode except NameError: pass else: # Test a single unicode string is not expanded as a sequence. class C(object): __slots__ = unicode("abc") c = C() c.abc = 5 self.assertEqual(c.abc, 5) # _unicode_to_string used to modify slots in certain circumstances slots = (unicode("foo"), unicode("bar")) class C(object): __slots__ = slots x = C() x.foo = 5 self.assertEqual(x.foo, 5) self.assertEqual(type(slots[0]), unicode) # this used to leak references try: class C(object): __slots__ = [unichr(128)] except (TypeError, UnicodeEncodeError): pass else: self.fail("[unichr(128)] slots not caught") # Test leaks class Counted(object): counter = 0 # counts the number of instances alive def __init__(self): Counted.counter += 1 def __del__(self): Counted.counter -= 1 class C(object): __slots__ = ['a', 'b', 'c'] x = C() x.a = Counted() x.b = Counted() x.c = Counted() self.assertEqual(Counted.counter, 3) del x test_support.gc_collect() self.assertEqual(Counted.counter, 0) class D(C): pass x = D() x.a = Counted() x.z = Counted() self.assertEqual(Counted.counter, 2) del x test_support.gc_collect() self.assertEqual(Counted.counter, 0) class E(D): __slots__ = ['e'] x = E() x.a = Counted() x.z = Counted() x.e = Counted() self.assertEqual(Counted.counter, 3) del x test_support.gc_collect() self.assertEqual(Counted.counter, 0) # Test cyclical leaks [SF bug 519621] class F(object): __slots__ = ['a', 'b'] s = F() s.a = [Counted(), s] self.assertEqual(Counted.counter, 1) s = None test_support.gc_collect() self.assertEqual(Counted.counter, 0) # Test lookup leaks [SF bug 572567] import gc if hasattr(gc, 'get_objects'): class G(object): def __cmp__(self, other): return 0 __hash__ = None # Silence Py3k warning g = G() orig_objects = len(gc.get_objects()) for i in xrange(10): g==g new_objects = len(gc.get_objects()) self.assertEqual(orig_objects, new_objects) class H(object): __slots__ = ['a', 'b'] def __init__(self): self.a = 1 self.b = 2 def __del__(self_): self.assertEqual(self_.a, 1) self.assertEqual(self_.b, 2) with test_support.captured_output('stderr') as s: h = H() del h self.assertEqual(s.getvalue(), '') class X(object): __slots__ = "a" with self.assertRaises(AttributeError): del X().a def test_slots_special(self): # Testing __dict__ and __weakref__ in __slots__... class D(object): __slots__ = ["__dict__"] a = D() self.assertTrue(hasattr(a, "__dict__")) self.assertFalse(hasattr(a, "__weakref__")) a.foo = 42 self.assertEqual(a.__dict__, {"foo": 42}) class W(object): __slots__ = ["__weakref__"] a = W() self.assertTrue(hasattr(a, "__weakref__")) self.assertFalse(hasattr(a, "__dict__")) try: a.foo = 42 except AttributeError: pass else: self.fail("shouldn't be allowed to set a.foo") class C1(W, D): __slots__ = [] a = C1() self.assertTrue(hasattr(a, "__dict__")) self.assertTrue(hasattr(a, "__weakref__")) a.foo = 42 self.assertEqual(a.__dict__, {"foo": 42}) class C2(D, W): __slots__ = [] a = C2() self.assertTrue(hasattr(a, "__dict__")) self.assertTrue(hasattr(a, "__weakref__")) a.foo = 42 self.assertEqual(a.__dict__, {"foo": 42}) def test_slots_descriptor(self): # Issue2115: slot descriptors did not correctly check # the type of the given object import abc class MyABC: __metaclass__ = abc.ABCMeta __slots__ = "a" class Unrelated(object): pass MyABC.register(Unrelated) u = Unrelated() self.assertIsInstance(u, MyABC) # This used to crash self.assertRaises(TypeError, MyABC.a.__set__, u, 3) def test_metaclass_cmp(self): # See bug 7491. class M(type): def __cmp__(self, other): return -1 class X(object): __metaclass__ = M self.assertTrue(X < M) def test_dynamics(self): # Testing class attribute propagation... class D(object): pass class E(D): pass class F(D): pass D.foo = 1 self.assertEqual(D.foo, 1) # Test that dynamic attributes are inherited self.assertEqual(E.foo, 1) self.assertEqual(F.foo, 1) # Test dynamic instances class C(object): pass a = C() self.assertFalse(hasattr(a, "foobar")) C.foobar = 2 self.assertEqual(a.foobar, 2) C.method = lambda self: 42 self.assertEqual(a.method(), 42) C.__repr__ = lambda self: "C()" self.assertEqual(repr(a), "C()") C.__int__ = lambda self: 100 self.assertEqual(int(a), 100) self.assertEqual(a.foobar, 2) self.assertFalse(hasattr(a, "spam")) def mygetattr(self, name): if name == "spam": return "spam" raise AttributeError C.__getattr__ = mygetattr self.assertEqual(a.spam, "spam") a.new = 12 self.assertEqual(a.new, 12) def mysetattr(self, name, value): if name == "spam": raise AttributeError return object.__setattr__(self, name, value) C.__setattr__ = mysetattr try: a.spam = "not spam" except AttributeError: pass else: self.fail("expected AttributeError") self.assertEqual(a.spam, "spam") class D(C): pass d = D() d.foo = 1 self.assertEqual(d.foo, 1) # Test handling of int*seq and seq*int class I(int): pass self.assertEqual("a"*I(2), "aa") self.assertEqual(I(2)*"a", "aa") self.assertEqual(2*I(3), 6) self.assertEqual(I(3)*2, 6) self.assertEqual(I(3)*I(2), 6) # Test handling of long*seq and seq*long class L(long): pass self.assertEqual("a"*L(2L), "aa") self.assertEqual(L(2L)*"a", "aa") self.assertEqual(2*L(3), 6) self.assertEqual(L(3)*2, 6) self.assertEqual(L(3)*L(2), 6) # Test comparison of classes with dynamic metaclasses class dynamicmetaclass(type): pass class someclass: __metaclass__ = dynamicmetaclass self.assertNotEqual(someclass, object) def test_errors(self): # Testing errors... try: class C(list, dict): pass except TypeError: pass else: self.fail("inheritance from both list and dict should be illegal") try: class C(object, None): pass except TypeError: pass else: self.fail("inheritance from non-type should be illegal") class Classic: pass try: class C(type(len)): pass except TypeError: pass else: self.fail("inheritance from CFunction should be illegal") try: class C(object): __slots__ = 1 except TypeError: pass else: self.fail("__slots__ = 1 should be illegal") try: class C(object): __slots__ = [1] except TypeError: pass else: self.fail("__slots__ = [1] should be illegal") class M1(type): pass class M2(type): pass class A1(object): __metaclass__ = M1 class A2(object): __metaclass__ = M2 try: class B(A1, A2): pass except TypeError: pass else: self.fail("finding the most derived metaclass should have failed") def test_classmethods(self): # Testing class methods... class C(object): def foo(*a): return a goo = classmethod(foo) c = C() self.assertEqual(C.goo(1), (C, 1)) self.assertEqual(c.goo(1), (C, 1)) self.assertEqual(c.foo(1), (c, 1)) class D(C): pass d = D() self.assertEqual(D.goo(1), (D, 1)) self.assertEqual(d.goo(1), (D, 1)) self.assertEqual(d.foo(1), (d, 1)) self.assertEqual(D.foo(d, 1), (d, 1)) # Test for a specific crash (SF bug 528132) def f(cls, arg): return (cls, arg) ff = classmethod(f) self.assertEqual(ff.__get__(0, int)(42), (int, 42)) self.assertEqual(ff.__get__(0)(42), (int, 42)) # Test super() with classmethods (SF bug 535444) self.assertEqual(C.goo.im_self, C) self.assertEqual(D.goo.im_self, D) self.assertEqual(super(D,D).goo.im_self, D) self.assertEqual(super(D,d).goo.im_self, D) self.assertEqual(super(D,D).goo(), (D,)) self.assertEqual(super(D,d).goo(), (D,)) # Verify that a non-callable will raise meth = classmethod(1).__get__(1) self.assertRaises(TypeError, meth) # Verify that classmethod() doesn't allow keyword args try: classmethod(f, kw=1) except TypeError: pass else: self.fail("classmethod shouldn't accept keyword args") @test_support.impl_detail("the module 'xxsubtype' is internal") def test_classmethods_in_c(self): # Testing C-based class methods... import xxsubtype as spam a = (1, 2, 3) d = {'abc': 123} x, a1, d1 = spam.spamlist.classmeth(*a, **d) self.assertEqual(x, spam.spamlist) self.assertEqual(a, a1) self.assertEqual(d, d1) x, a1, d1 = spam.spamlist().classmeth(*a, **d) self.assertEqual(x, spam.spamlist) self.assertEqual(a, a1) self.assertEqual(d, d1) def test_staticmethods(self): # Testing static methods... class C(object): def foo(*a): return a goo = staticmethod(foo) c = C() self.assertEqual(C.goo(1), (1,)) self.assertEqual(c.goo(1), (1,)) self.assertEqual(c.foo(1), (c, 1,)) class D(C): pass d = D() self.assertEqual(D.goo(1), (1,)) self.assertEqual(d.goo(1), (1,)) self.assertEqual(d.foo(1), (d, 1)) self.assertEqual(D.foo(d, 1), (d, 1)) @test_support.impl_detail("the module 'xxsubtype' is internal") def test_staticmethods_in_c(self): # Testing C-based static methods... import xxsubtype as spam a = (1, 2, 3) d = {"abc": 123} x, a1, d1 = spam.spamlist.staticmeth(*a, **d) self.assertEqual(x, None) self.assertEqual(a, a1) self.assertEqual(d, d1) x, a1, d2 = spam.spamlist().staticmeth(*a, **d) self.assertEqual(x, None) self.assertEqual(a, a1) self.assertEqual(d, d1) def test_classic(self): # Testing classic classes... class C: def foo(*a): return a goo = classmethod(foo) c = C() self.assertEqual(C.goo(1), (C, 1)) self.assertEqual(c.goo(1), (C, 1)) self.assertEqual(c.foo(1), (c, 1)) class D(C): pass d = D() self.assertEqual(D.goo(1), (D, 1)) self.assertEqual(d.goo(1), (D, 1)) self.assertEqual(d.foo(1), (d, 1)) self.assertEqual(D.foo(d, 1), (d, 1)) class E: # *not* subclassing from C foo = C.foo self.assertEqual(E().foo, C.foo) # i.e., unbound self.assertTrue(repr(C.foo.__get__(C())).startswith("<bound method ")) def test_compattr(self): # Testing computed attributes... class C(object): class computed_attribute(object): def __init__(self, get, set=None, delete=None): self.__get = get self.__set = set self.__delete = delete def __get__(self, obj, type=None): return self.__get(obj) def __set__(self, obj, value): return self.__set(obj, value) def __delete__(self, obj): return self.__delete(obj) def __init__(self): self.__x = 0 def __get_x(self): x = self.__x self.__x = x+1 return x def __set_x(self, x): self.__x = x def __delete_x(self): del self.__x x = computed_attribute(__get_x, __set_x, __delete_x) a = C() self.assertEqual(a.x, 0) self.assertEqual(a.x, 1) a.x = 10 self.assertEqual(a.x, 10) self.assertEqual(a.x, 11) del a.x self.assertEqual(hasattr(a, 'x'), 0) def test_newslots(self): # Testing __new__ slot override... class C(list): def __new__(cls): self = list.__new__(cls) self.foo = 1 return self def __init__(self): self.foo = self.foo + 2 a = C() self.assertEqual(a.foo, 3) self.assertEqual(a.__class__, C) class D(C): pass b = D() self.assertEqual(b.foo, 3) self.assertEqual(b.__class__, D) def test_altmro(self): # Testing mro() and overriding it... class A(object): def f(self): return "A" class B(A): pass class C(A): def f(self): return "C" class D(B, C): pass self.assertEqual(D.mro(), [D, B, C, A, object]) self.assertEqual(D.__mro__, (D, B, C, A, object)) self.assertEqual(D().f(), "C") class PerverseMetaType(type): def mro(cls): L = type.mro(cls) L.reverse() return L class X(D,B,C,A): __metaclass__ = PerverseMetaType self.assertEqual(X.__mro__, (object, A, C, B, D, X)) self.assertEqual(X().f(), "A") try: class X(object): class __metaclass__(type): def mro(self): return [self, dict, object] # In CPython, the class creation above already raises # TypeError, as a protection against the fact that # instances of X would segfault it. In other Python # implementations it would be ok to let the class X # be created, but instead get a clean TypeError on the # __setitem__ below. x = object.__new__(X) x[5] = 6 except TypeError: pass else: self.fail("devious mro() return not caught") try: class X(object): class __metaclass__(type): def mro(self): return [1] except TypeError: pass else: self.fail("non-class mro() return not caught") try: class X(object): class __metaclass__(type): def mro(self): return 1 except TypeError: pass else: self.fail("non-sequence mro() return not caught") def test_overloading(self): # Testing operator overloading... class B(object): "Intermediate class because object doesn't have a __setattr__" class C(B): def __getattr__(self, name): if name == "foo": return ("getattr", name) else: raise AttributeError def __setattr__(self, name, value): if name == "foo": self.setattr = (name, value) else: return B.__setattr__(self, name, value) def __delattr__(self, name): if name == "foo": self.delattr = name else: return B.__delattr__(self, name) def __getitem__(self, key): return ("getitem", key) def __setitem__(self, key, value): self.setitem = (key, value) def __delitem__(self, key): self.delitem = key def __getslice__(self, i, j): return ("getslice", i, j) def __setslice__(self, i, j, value): self.setslice = (i, j, value) def __delslice__(self, i, j): self.delslice = (i, j) a = C() self.assertEqual(a.foo, ("getattr", "foo")) a.foo = 12 self.assertEqual(a.setattr, ("foo", 12)) del a.foo self.assertEqual(a.delattr, "foo") self.assertEqual(a[12], ("getitem", 12)) a[12] = 21 self.assertEqual(a.setitem, (12, 21)) del a[12] self.assertEqual(a.delitem, 12) self.assertEqual(a[0:10], ("getslice", 0, 10)) a[0:10] = "foo" self.assertEqual(a.setslice, (0, 10, "foo")) del a[0:10] self.assertEqual(a.delslice, (0, 10)) def test_methods(self): # Testing methods... class C(object): def __init__(self, x): self.x = x def foo(self): return self.x c1 = C(1) self.assertEqual(c1.foo(), 1) class D(C): boo = C.foo goo = c1.foo d2 = D(2) self.assertEqual(d2.foo(), 2) self.assertEqual(d2.boo(), 2) self.assertEqual(d2.goo(), 1) class E(object): foo = C.foo self.assertEqual(E().foo, C.foo) # i.e., unbound self.assertTrue(repr(C.foo.__get__(C(1))).startswith("<bound method ")) def test_special_method_lookup(self): # The lookup of special methods bypasses __getattr__ and # __getattribute__, but they still can be descriptors. def run_context(manager): with manager: pass def iden(self): return self def hello(self): return "hello" def empty_seq(self): return [] def zero(self): return 0 def complex_num(self): return 1j def stop(self): raise StopIteration def return_true(self, thing=None): return True def do_isinstance(obj): return isinstance(int, obj) def do_issubclass(obj): return issubclass(int, obj) def swallow(*args): pass def do_dict_missing(checker): class DictSub(checker.__class__, dict): pass self.assertEqual(DictSub()["hi"], 4) def some_number(self_, key): self.assertEqual(key, "hi") return 4 def format_impl(self, spec): return "hello" # It would be nice to have every special method tested here, but I'm # only listing the ones I can remember outside of typeobject.c, since it # does it right. specials = [ ("__unicode__", unicode, hello, set(), {}), ("__reversed__", reversed, empty_seq, set(), {}), ("__length_hint__", list, zero, set(), {"__iter__" : iden, "next" : stop}), ("__sizeof__", sys.getsizeof, zero, set(), {}), ("__instancecheck__", do_isinstance, return_true, set(), {}), ("__missing__", do_dict_missing, some_number, set(("__class__",)), {}), ("__subclasscheck__", do_issubclass, return_true, set(("__bases__",)), {}), ("__enter__", run_context, iden, set(), {"__exit__" : swallow}), ("__exit__", run_context, swallow, set(), {"__enter__" : iden}), ("__complex__", complex, complex_num, set(), {}), ("__format__", format, format_impl, set(), {}), ("__dir__", dir, empty_seq, set(), {}), ] class Checker(object): def __getattr__(self, attr, test=self): test.fail("__getattr__ called with {0}".format(attr)) def __getattribute__(self, attr, test=self): if attr not in ok: test.fail("__getattribute__ called with {0}".format(attr)) return object.__getattribute__(self, attr) class SpecialDescr(object): def __init__(self, impl): self.impl = impl def __get__(self, obj, owner): record.append(1) return self.impl.__get__(obj, owner) class MyException(Exception): pass class ErrDescr(object): def __get__(self, obj, owner): raise MyException for name, runner, meth_impl, ok, env in specials: class X(Checker): pass for attr, obj in env.iteritems(): setattr(X, attr, obj) setattr(X, name, meth_impl) runner(X()) record = [] class X(Checker): pass for attr, obj in env.iteritems(): setattr(X, attr, obj) setattr(X, name, SpecialDescr(meth_impl)) runner(X()) self.assertEqual(record, [1], name) class X(Checker): pass for attr, obj in env.iteritems(): setattr(X, attr, obj) setattr(X, name, ErrDescr()) try: runner(X()) except MyException: pass else: self.fail("{0!r} didn't raise".format(name)) def test_specials(self): # Testing special operators... # Test operators like __hash__ for which a built-in default exists # Test the default behavior for static classes class C(object): def __getitem__(self, i): if 0 <= i < 10: return i raise IndexError c1 = C() c2 = C() self.assertTrue(not not c1) # What? self.assertNotEqual(id(c1), id(c2)) hash(c1) hash(c2) self.assertEqual(cmp(c1, c2), cmp(id(c1), id(c2))) self.assertEqual(c1, c1) self.assertTrue(c1 != c2) self.assertTrue(not c1 != c1) self.assertTrue(not c1 == c2) # Note that the module name appears in str/repr, and that varies # depending on whether this test is run standalone or from a framework. self.assertTrue(str(c1).find('C object at ') >= 0) self.assertEqual(str(c1), repr(c1)) self.assertNotIn(-1, c1) for i in range(10): self.assertIn(i, c1) self.assertNotIn(10, c1) # Test the default behavior for dynamic classes class D(object): def __getitem__(self, i): if 0 <= i < 10: return i raise IndexError d1 = D() d2 = D() self.assertTrue(not not d1) self.assertNotEqual(id(d1), id(d2)) hash(d1) hash(d2) self.assertEqual(cmp(d1, d2), cmp(id(d1), id(d2))) self.assertEqual(d1, d1) self.assertNotEqual(d1, d2) self.assertTrue(not d1 != d1) self.assertTrue(not d1 == d2) # Note that the module name appears in str/repr, and that varies # depending on whether this test is run standalone or from a framework. self.assertTrue(str(d1).find('D object at ') >= 0) self.assertEqual(str(d1), repr(d1)) self.assertNotIn(-1, d1) for i in range(10): self.assertIn(i, d1) self.assertNotIn(10, d1) # Test overridden behavior for static classes class Proxy(object): def __init__(self, x): self.x = x def __nonzero__(self): return not not self.x def __hash__(self): return hash(self.x) def __eq__(self, other): return self.x == other def __ne__(self, other): return self.x != other def __cmp__(self, other): return cmp(self.x, other.x) def __str__(self): return "Proxy:%s" % self.x def __repr__(self): return "Proxy(%r)" % self.x def __contains__(self, value): return value in self.x p0 = Proxy(0) p1 = Proxy(1) p_1 = Proxy(-1) self.assertFalse(p0) self.assertTrue(not not p1) self.assertEqual(hash(p0), hash(0)) self.assertEqual(p0, p0) self.assertNotEqual(p0, p1) self.assertTrue(not p0 != p0) self.assertEqual(not p0, p1) self.assertEqual(cmp(p0, p1), -1) self.assertEqual(cmp(p0, p0), 0) self.assertEqual(cmp(p0, p_1), 1) self.assertEqual(str(p0), "Proxy:0") self.assertEqual(repr(p0), "Proxy(0)") p10 = Proxy(range(10)) self.assertNotIn(-1, p10) for i in range(10): self.assertIn(i, p10) self.assertNotIn(10, p10) # Test overridden behavior for dynamic classes class DProxy(object): def __init__(self, x): self.x = x def __nonzero__(self): return not not self.x def __hash__(self): return hash(self.x) def __eq__(self, other): return self.x == other def __ne__(self, other): return self.x != other def __cmp__(self, other): return cmp(self.x, other.x) def __str__(self): return "DProxy:%s" % self.x def __repr__(self): return "DProxy(%r)" % self.x def __contains__(self, value): return value in self.x p0 = DProxy(0) p1 = DProxy(1) p_1 = DProxy(-1) self.assertFalse(p0) self.assertTrue(not not p1) self.assertEqual(hash(p0), hash(0)) self.assertEqual(p0, p0) self.assertNotEqual(p0, p1) self.assertNotEqual(not p0, p0) self.assertEqual(not p0, p1) self.assertEqual(cmp(p0, p1), -1) self.assertEqual(cmp(p0, p0), 0) self.assertEqual(cmp(p0, p_1), 1) self.assertEqual(str(p0), "DProxy:0") self.assertEqual(repr(p0), "DProxy(0)") p10 = DProxy(range(10)) self.assertNotIn(-1, p10) for i in range(10): self.assertIn(i, p10) self.assertNotIn(10, p10) # Safety test for __cmp__ def unsafecmp(a, b): if not hasattr(a, '__cmp__'): return # some types don't have a __cmp__ any more (so the # test doesn't make sense any more), or maybe they # never had a __cmp__ at all, e.g. in PyPy try: a.__class__.__cmp__(a, b) except TypeError: pass else: self.fail("shouldn't allow %s.__cmp__(%r, %r)" % ( a.__class__, a, b)) unsafecmp(u"123", "123") unsafecmp("123", u"123") unsafecmp(1, 1.0) unsafecmp(1.0, 1) unsafecmp(1, 1L) unsafecmp(1L, 1) @test_support.impl_detail("custom logic for printing to real file objects") def test_recursions_1(self): # Testing recursion checks ... class Letter(str): def __new__(cls, letter): if letter == 'EPS': return str.__new__(cls) return str.__new__(cls, letter) def __str__(self): if not self: return 'EPS' return self # sys.stdout needs to be the original to trigger the recursion bug test_stdout = sys.stdout sys.stdout = test_support.get_original_stdout() try: # nothing should actually be printed, this should raise an exception print Letter('w') except RuntimeError: pass else: self.fail("expected a RuntimeError for print recursion") finally: sys.stdout = test_stdout def test_recursions_2(self): # Bug #1202533. class A(object): pass A.__mul__ = types.MethodType(lambda self, x: self * x, None, A) try: A()*2 except RuntimeError: pass else: self.fail("expected a RuntimeError") def test_weakrefs(self): # Testing weak references... import weakref class C(object): pass c = C() r = weakref.ref(c) self.assertEqual(r(), c) del c test_support.gc_collect() self.assertEqual(r(), None) del r class NoWeak(object): __slots__ = ['foo'] no = NoWeak() try: weakref.ref(no) except TypeError, msg: self.assertTrue(str(msg).find("weak reference") >= 0) else: self.fail("weakref.ref(no) should be illegal") class Weak(object): __slots__ = ['foo', '__weakref__'] yes = Weak() r = weakref.ref(yes) self.assertEqual(r(), yes) del yes test_support.gc_collect() self.assertEqual(r(), None) del r def test_properties(self): # Testing property... class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, doc="I'm the x property.") a = C() self.assertFalse(hasattr(a, "x")) a.x = 42 self.assertEqual(a._C__x, 42) self.assertEqual(a.x, 42) del a.x self.assertFalse(hasattr(a, "x")) self.assertFalse(hasattr(a, "_C__x")) C.x.__set__(a, 100) self.assertEqual(C.x.__get__(a), 100) C.x.__delete__(a) self.assertFalse(hasattr(a, "x")) raw = C.__dict__['x'] self.assertIsInstance(raw, property) attrs = dir(raw) self.assertIn("__doc__", attrs) self.assertIn("fget", attrs) self.assertIn("fset", attrs) self.assertIn("fdel", attrs) self.assertEqual(raw.__doc__, "I'm the x property.") self.assertTrue(raw.fget is C.__dict__['getx']) self.assertTrue(raw.fset is C.__dict__['setx']) self.assertTrue(raw.fdel is C.__dict__['delx']) for attr in "__doc__", "fget", "fset", "fdel": try: setattr(raw, attr, 42) except TypeError, msg: if str(msg).find('readonly') < 0: self.fail("when setting readonly attr %r on a property, " "got unexpected TypeError msg %r" % (attr, str(msg))) else: self.fail("expected TypeError from trying to set readonly %r " "attr on a property" % attr) class D(object): __getitem__ = property(lambda s: 1/0) d = D() try: for i in d: str(i) except ZeroDivisionError: pass else: self.fail("expected ZeroDivisionError from bad property") @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def test_properties_doc_attrib(self): class E(object): def getter(self): "getter method" return 0 def setter(self_, value): "setter method" pass prop = property(getter) self.assertEqual(prop.__doc__, "getter method") prop2 = property(fset=setter) self.assertEqual(prop2.__doc__, None) def test_testcapi_no_segfault(self): # this segfaulted in 2.5b2 try: import _testcapi except ImportError: pass else: class X(object): p = property(_testcapi.test_with_docstring) def test_properties_plus(self): class C(object): foo = property(doc="hello") @foo.getter def foo(self): return self._foo @foo.setter def foo(self, value): self._foo = abs(value) @foo.deleter def foo(self): del self._foo c = C() self.assertEqual(C.foo.__doc__, "hello") self.assertFalse(hasattr(c, "foo")) c.foo = -42 self.assertTrue(hasattr(c, '_foo')) self.assertEqual(c._foo, 42) self.assertEqual(c.foo, 42) del c.foo self.assertFalse(hasattr(c, '_foo')) self.assertFalse(hasattr(c, "foo")) class D(C): @C.foo.deleter def foo(self): try: del self._foo except AttributeError: pass d = D() d.foo = 24 self.assertEqual(d.foo, 24) del d.foo del d.foo class E(object): @property def foo(self): return self._foo @foo.setter def foo(self, value): raise RuntimeError @foo.setter def foo(self, value): self._foo = abs(value) @foo.deleter def foo(self, value=None): del self._foo e = E() e.foo = -42 self.assertEqual(e.foo, 42) del e.foo class F(E): @E.foo.deleter def foo(self): del self._foo @foo.setter def foo(self, value): self._foo = max(0, value) f = F() f.foo = -10 self.assertEqual(f.foo, 0) del f.foo def test_dict_constructors(self): # Testing dict constructor ... d = dict() self.assertEqual(d, {}) d = dict({}) self.assertEqual(d, {}) d = dict({1: 2, 'a': 'b'}) self.assertEqual(d, {1: 2, 'a': 'b'}) self.assertEqual(d, dict(d.items())) self.assertEqual(d, dict(d.iteritems())) d = dict({'one':1, 'two':2}) self.assertEqual(d, dict(one=1, two=2)) self.assertEqual(d, dict(**d)) self.assertEqual(d, dict({"one": 1}, two=2)) self.assertEqual(d, dict([("two", 2)], one=1)) self.assertEqual(d, dict([("one", 100), ("two", 200)], **d)) self.assertEqual(d, dict(**d)) for badarg in 0, 0L, 0j, "0", [0], (0,): try: dict(badarg) except TypeError: pass except ValueError: if badarg == "0": # It's a sequence, and its elements are also sequences (gotta # love strings <wink>), but they aren't of length 2, so this # one seemed better as a ValueError than a TypeError. pass else: self.fail("no TypeError from dict(%r)" % badarg) else: self.fail("no TypeError from dict(%r)" % badarg) try: dict({}, {}) except TypeError: pass else: self.fail("no TypeError from dict({}, {})") class Mapping: # Lacks a .keys() method; will be added later. dict = {1:2, 3:4, 'a':1j} try: dict(Mapping()) except TypeError: pass else: self.fail("no TypeError from dict(incomplete mapping)") Mapping.keys = lambda self: self.dict.keys() Mapping.__getitem__ = lambda self, i: self.dict[i] d = dict(Mapping()) self.assertEqual(d, Mapping.dict) # Init from sequence of iterable objects, each producing a 2-sequence. class AddressBookEntry: def __init__(self, first, last): self.first = first self.last = last def __iter__(self): return iter([self.first, self.last]) d = dict([AddressBookEntry('Tim', 'Warsaw'), AddressBookEntry('Barry', 'Peters'), AddressBookEntry('Tim', 'Peters'), AddressBookEntry('Barry', 'Warsaw')]) self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'}) d = dict(zip(range(4), range(1, 5))) self.assertEqual(d, dict([(i, i+1) for i in range(4)])) # Bad sequence lengths. for bad in [('tooshort',)], [('too', 'long', 'by 1')]: try: dict(bad) except ValueError: pass else: self.fail("no ValueError from dict(%r)" % bad) def test_dir(self): # Testing dir() ... junk = 12 self.assertEqual(dir(), ['junk', 'self']) del junk # Just make sure these don't blow up! for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, self.test_dir: dir(arg) # Try classic classes. class C: Cdata = 1 def Cmethod(self): pass cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__'] self.assertEqual(dir(C), cstuff) self.assertIn('im_self', dir(C.Cmethod)) c = C() # c.__doc__ is an odd thing to see here; ditto c.__module__. self.assertEqual(dir(c), cstuff) c.cdata = 2 c.cmethod = lambda self: 0 self.assertEqual(dir(c), cstuff + ['cdata', 'cmethod']) self.assertIn('im_self', dir(c.Cmethod)) class A(C): Adata = 1 def Amethod(self): pass astuff = ['Adata', 'Amethod'] + cstuff self.assertEqual(dir(A), astuff) self.assertIn('im_self', dir(A.Amethod)) a = A() self.assertEqual(dir(a), astuff) self.assertIn('im_self', dir(a.Amethod)) a.adata = 42 a.amethod = lambda self: 3 self.assertEqual(dir(a), astuff + ['adata', 'amethod']) # The same, but with new-style classes. Since these have object as a # base class, a lot more gets sucked in. def interesting(strings): return [s for s in strings if not s.startswith('_')] class C(object): Cdata = 1 def Cmethod(self): pass cstuff = ['Cdata', 'Cmethod'] self.assertEqual(interesting(dir(C)), cstuff) c = C() self.assertEqual(interesting(dir(c)), cstuff) self.assertIn('im_self', dir(C.Cmethod)) c.cdata = 2 c.cmethod = lambda self: 0 self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod']) self.assertIn('im_self', dir(c.Cmethod)) class A(C): Adata = 1 def Amethod(self): pass astuff = ['Adata', 'Amethod'] + cstuff self.assertEqual(interesting(dir(A)), astuff) self.assertIn('im_self', dir(A.Amethod)) a = A() self.assertEqual(interesting(dir(a)), astuff) a.adata = 42 a.amethod = lambda self: 3 self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod']) self.assertIn('im_self', dir(a.Amethod)) # Try a module subclass. class M(type(sys)): pass minstance = M("m") minstance.b = 2 minstance.a = 1 names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]] self.assertEqual(names, ['a', 'b']) class M2(M): def getdict(self): return "Not a dict!" __dict__ = property(getdict) m2instance = M2("m2") m2instance.b = 2 m2instance.a = 1 self.assertEqual(m2instance.__dict__, "Not a dict!") try: dir(m2instance) except TypeError: pass # Two essentially featureless objects, just inheriting stuff from # object. self.assertEqual(dir(NotImplemented), dir(Ellipsis)) if test_support.check_impl_detail(): # None differs in PyPy: it has a __nonzero__ self.assertEqual(dir(None), dir(Ellipsis)) # Nasty test case for proxied objects class Wrapper(object): def __init__(self, obj): self.__obj = obj def __repr__(self): return "Wrapper(%s)" % repr(self.__obj) def __getitem__(self, key): return Wrapper(self.__obj[key]) def __len__(self): return len(self.__obj) def __getattr__(self, name): return Wrapper(getattr(self.__obj, name)) class C(object): def __getclass(self): return Wrapper(type(self)) __class__ = property(__getclass) dir(C()) # This used to segfault def test_supers(self): # Testing super... class A(object): def meth(self, a): return "A(%r)" % a self.assertEqual(A().meth(1), "A(1)") class B(A): def __init__(self): self.__super = super(B, self) def meth(self, a): return "B(%r)" % a + self.__super.meth(a) self.assertEqual(B().meth(2), "B(2)A(2)") class C(A): def meth(self, a): return "C(%r)" % a + self.__super.meth(a) C._C__super = super(C) self.assertEqual(C().meth(3), "C(3)A(3)") class D(C, B): def meth(self, a): return "D(%r)" % a + super(D, self).meth(a) self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)") # Test for subclassing super class mysuper(super): def __init__(self, *args): return super(mysuper, self).__init__(*args) class E(D): def meth(self, a): return "E(%r)" % a + mysuper(E, self).meth(a) self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)") class F(E): def meth(self, a): s = self.__super # == mysuper(F, self) return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a) F._F__super = mysuper(F) self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)") # Make sure certain errors are raised try: super(D, 42) except TypeError: pass else: self.fail("shouldn't allow super(D, 42)") try: super(D, C()) except TypeError: pass else: self.fail("shouldn't allow super(D, C())") try: super(D).__get__(12) except TypeError: pass else: self.fail("shouldn't allow super(D).__get__(12)") try: super(D).__get__(C()) except TypeError: pass else: self.fail("shouldn't allow super(D).__get__(C())") # Make sure data descriptors can be overridden and accessed via super # (new feature in Python 2.3) class DDbase(object): def getx(self): return 42 x = property(getx) class DDsub(DDbase): def getx(self): return "hello" x = property(getx) dd = DDsub() self.assertEqual(dd.x, "hello") self.assertEqual(super(DDsub, dd).x, 42) # Ensure that super() lookup of descriptor from classmethod # works (SF ID# 743627) class Base(object): aProp = property(lambda self: "foo") class Sub(Base): @classmethod def test(klass): return super(Sub,klass).aProp self.assertEqual(Sub.test(), Base.aProp) # Verify that super() doesn't allow keyword args try: super(Base, kw=1) except TypeError: pass else: self.assertEqual("super shouldn't accept keyword args") def test_basic_inheritance(self): # Testing inheritance from basic types... class hexint(int): def __repr__(self): return hex(self) def __add__(self, other): return hexint(int.__add__(self, other)) # (Note that overriding __radd__ doesn't work, # because the int type gets first dibs.) self.assertEqual(repr(hexint(7) + 9), "0x10") self.assertEqual(repr(hexint(1000) + 7), "0x3ef") a = hexint(12345) self.assertEqual(a, 12345) self.assertEqual(int(a), 12345) self.assertTrue(int(a).__class__ is int) self.assertEqual(hash(a), hash(12345)) self.assertTrue((+a).__class__ is int) self.assertTrue((a >> 0).__class__ is int) self.assertTrue((a << 0).__class__ is int) self.assertTrue((hexint(0) << 12).__class__ is int) self.assertTrue((hexint(0) >> 12).__class__ is int) class octlong(long): __slots__ = [] def __str__(self): s = oct(self) if s[-1] == 'L': s = s[:-1] return s def __add__(self, other): return self.__class__(super(octlong, self).__add__(other)) __radd__ = __add__ self.assertEqual(str(octlong(3) + 5), "010") # (Note that overriding __radd__ here only seems to work # because the example uses a short int left argument.) self.assertEqual(str(5 + octlong(3000)), "05675") a = octlong(12345) self.assertEqual(a, 12345L) self.assertEqual(long(a), 12345L) self.assertEqual(hash(a), hash(12345L)) self.assertTrue(long(a).__class__ is long) self.assertTrue((+a).__class__ is long) self.assertTrue((-a).__class__ is long) self.assertTrue((-octlong(0)).__class__ is long) self.assertTrue((a >> 0).__class__ is long) self.assertTrue((a << 0).__class__ is long) self.assertTrue((a - 0).__class__ is long) self.assertTrue((a * 1).__class__ is long) self.assertTrue((a ** 1).__class__ is long) self.assertTrue((a // 1).__class__ is long) self.assertTrue((1 * a).__class__ is long) self.assertTrue((a | 0).__class__ is long) self.assertTrue((a ^ 0).__class__ is long) self.assertTrue((a & -1L).__class__ is long) self.assertTrue((octlong(0) << 12).__class__ is long) self.assertTrue((octlong(0) >> 12).__class__ is long) self.assertTrue(abs(octlong(0)).__class__ is long) # Because octlong overrides __add__, we can't check the absence of +0 # optimizations using octlong. class longclone(long): pass a = longclone(1) self.assertTrue((a + 0).__class__ is long) self.assertTrue((0 + a).__class__ is long) # Check that negative clones don't segfault a = longclone(-1) self.assertEqual(a.__dict__, {}) self.assertEqual(long(a), -1) # self.assertTrue PyNumber_Long() copies the sign bit class precfloat(float): __slots__ = ['prec'] def __init__(self, value=0.0, prec=12): self.prec = int(prec) def __repr__(self): return "%.*g" % (self.prec, self) self.assertEqual(repr(precfloat(1.1)), "1.1") a = precfloat(12345) self.assertEqual(a, 12345.0) self.assertEqual(float(a), 12345.0) self.assertTrue(float(a).__class__ is float) self.assertEqual(hash(a), hash(12345.0)) self.assertTrue((+a).__class__ is float) class madcomplex(complex): def __repr__(self): return "%.17gj%+.17g" % (self.imag, self.real) a = madcomplex(-3, 4) self.assertEqual(repr(a), "4j-3") base = complex(-3, 4) self.assertEqual(base.__class__, complex) self.assertEqual(a, base) self.assertEqual(complex(a), base) self.assertEqual(complex(a).__class__, complex) a = madcomplex(a) # just trying another form of the constructor self.assertEqual(repr(a), "4j-3") self.assertEqual(a, base) self.assertEqual(complex(a), base) self.assertEqual(complex(a).__class__, complex) self.assertEqual(hash(a), hash(base)) self.assertEqual((+a).__class__, complex) self.assertEqual((a + 0).__class__, complex) self.assertEqual(a + 0, base) self.assertEqual((a - 0).__class__, complex) self.assertEqual(a - 0, base) self.assertEqual((a * 1).__class__, complex) self.assertEqual(a * 1, base) self.assertEqual((a / 1).__class__, complex) self.assertEqual(a / 1, base) class madtuple(tuple): _rev = None def rev(self): if self._rev is not None: return self._rev L = list(self) L.reverse() self._rev = self.__class__(L) return self._rev a = madtuple((1,2,3,4,5,6,7,8,9,0)) self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0)) self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1))) self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0))) for i in range(512): t = madtuple(range(i)) u = t.rev() v = u.rev() self.assertEqual(v, t) a = madtuple((1,2,3,4,5)) self.assertEqual(tuple(a), (1,2,3,4,5)) self.assertTrue(tuple(a).__class__ is tuple) self.assertEqual(hash(a), hash((1,2,3,4,5))) self.assertTrue(a[:].__class__ is tuple) self.assertTrue((a * 1).__class__ is tuple) self.assertTrue((a * 0).__class__ is tuple) self.assertTrue((a + ()).__class__ is tuple) a = madtuple(()) self.assertEqual(tuple(a), ()) self.assertTrue(tuple(a).__class__ is tuple) self.assertTrue((a + a).__class__ is tuple) self.assertTrue((a * 0).__class__ is tuple) self.assertTrue((a * 1).__class__ is tuple) self.assertTrue((a * 2).__class__ is tuple) self.assertTrue(a[:].__class__ is tuple) class madstring(str): _rev = None def rev(self): if self._rev is not None: return self._rev L = list(self) L.reverse() self._rev = self.__class__("".join(L)) return self._rev s = madstring("abcdefghijklmnopqrstuvwxyz") self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz") self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba")) self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz")) for i in range(256): s = madstring("".join(map(chr, range(i)))) t = s.rev() u = t.rev() self.assertEqual(u, s) s = madstring("12345") self.assertEqual(str(s), "12345") self.assertTrue(str(s).__class__ is str) base = "\x00" * 5 s = madstring(base) self.assertEqual(s, base) self.assertEqual(str(s), base) self.assertTrue(str(s).__class__ is str) self.assertEqual(hash(s), hash(base)) self.assertEqual({s: 1}[base], 1) self.assertEqual({base: 1}[s], 1) self.assertTrue((s + "").__class__ is str) self.assertEqual(s + "", base) self.assertTrue(("" + s).__class__ is str) self.assertEqual("" + s, base) self.assertTrue((s * 0).__class__ is str) self.assertEqual(s * 0, "") self.assertTrue((s * 1).__class__ is str) self.assertEqual(s * 1, base) self.assertTrue((s * 2).__class__ is str) self.assertEqual(s * 2, base + base) self.assertTrue(s[:].__class__ is str) self.assertEqual(s[:], base) self.assertTrue(s[0:0].__class__ is str) self.assertEqual(s[0:0], "") self.assertTrue(s.strip().__class__ is str) self.assertEqual(s.strip(), base) self.assertTrue(s.lstrip().__class__ is str) self.assertEqual(s.lstrip(), base) self.assertTrue(s.rstrip().__class__ is str) self.assertEqual(s.rstrip(), base) identitytab = ''.join([chr(i) for i in range(256)]) self.assertTrue(s.translate(identitytab).__class__ is str) self.assertEqual(s.translate(identitytab), base) self.assertTrue(s.translate(identitytab, "x").__class__ is str) self.assertEqual(s.translate(identitytab, "x"), base) self.assertEqual(s.translate(identitytab, "\x00"), "") self.assertTrue(s.replace("x", "x").__class__ is str) self.assertEqual(s.replace("x", "x"), base) self.assertTrue(s.ljust(len(s)).__class__ is str) self.assertEqual(s.ljust(len(s)), base) self.assertTrue(s.rjust(len(s)).__class__ is str) self.assertEqual(s.rjust(len(s)), base) self.assertTrue(s.center(len(s)).__class__ is str) self.assertEqual(s.center(len(s)), base) self.assertTrue(s.lower().__class__ is str) self.assertEqual(s.lower(), base) class madunicode(unicode): _rev = None def rev(self): if self._rev is not None: return self._rev L = list(self) L.reverse() self._rev = self.__class__(u"".join(L)) return self._rev u = madunicode("ABCDEF") self.assertEqual(u, u"ABCDEF") self.assertEqual(u.rev(), madunicode(u"FEDCBA")) self.assertEqual(u.rev().rev(), madunicode(u"ABCDEF")) base = u"12345" u = madunicode(base) self.assertEqual(unicode(u), base) self.assertTrue(unicode(u).__class__ is unicode) self.assertEqual(hash(u), hash(base)) self.assertEqual({u: 1}[base], 1) self.assertEqual({base: 1}[u], 1) self.assertTrue(u.strip().__class__ is unicode) self.assertEqual(u.strip(), base) self.assertTrue(u.lstrip().__class__ is unicode) self.assertEqual(u.lstrip(), base) self.assertTrue(u.rstrip().__class__ is unicode) self.assertEqual(u.rstrip(), base) self.assertTrue(u.replace(u"x", u"x").__class__ is unicode) self.assertEqual(u.replace(u"x", u"x"), base) self.assertTrue(u.replace(u"xy", u"xy").__class__ is unicode) self.assertEqual(u.replace(u"xy", u"xy"), base) self.assertTrue(u.center(len(u)).__class__ is unicode) self.assertEqual(u.center(len(u)), base) self.assertTrue(u.ljust(len(u)).__class__ is unicode) self.assertEqual(u.ljust(len(u)), base) self.assertTrue(u.rjust(len(u)).__class__ is unicode) self.assertEqual(u.rjust(len(u)), base) self.assertTrue(u.lower().__class__ is unicode) self.assertEqual(u.lower(), base) self.assertTrue(u.upper().__class__ is unicode) self.assertEqual(u.upper(), base) self.assertTrue(u.capitalize().__class__ is unicode) self.assertEqual(u.capitalize(), base) self.assertTrue(u.title().__class__ is unicode) self.assertEqual(u.title(), base) self.assertTrue((u + u"").__class__ is unicode) self.assertEqual(u + u"", base) self.assertTrue((u"" + u).__class__ is unicode) self.assertEqual(u"" + u, base) self.assertTrue((u * 0).__class__ is unicode) self.assertEqual(u * 0, u"") self.assertTrue((u * 1).__class__ is unicode) self.assertEqual(u * 1, base) self.assertTrue((u * 2).__class__ is unicode) self.assertEqual(u * 2, base + base) self.assertTrue(u[:].__class__ is unicode) self.assertEqual(u[:], base) self.assertTrue(u[0:0].__class__ is unicode) self.assertEqual(u[0:0], u"") class sublist(list): pass a = sublist(range(5)) self.assertEqual(a, range(5)) a.append("hello") self.assertEqual(a, range(5) + ["hello"]) a[5] = 5 self.assertEqual(a, range(6)) a.extend(range(6, 20)) self.assertEqual(a, range(20)) a[-5:] = [] self.assertEqual(a, range(15)) del a[10:15] self.assertEqual(len(a), 10) self.assertEqual(a, range(10)) self.assertEqual(list(a), range(10)) self.assertEqual(a[0], 0) self.assertEqual(a[9], 9) self.assertEqual(a[-10], 0) self.assertEqual(a[-1], 9) self.assertEqual(a[:5], range(5)) class CountedInput(file): """Counts lines read by self.readline(). self.lineno is the 0-based ordinal of the last line read, up to a maximum of one greater than the number of lines in the file. self.ateof is true if and only if the final "" line has been read, at which point self.lineno stops incrementing, and further calls to readline() continue to return "". """ lineno = 0 ateof = 0 def readline(self): if self.ateof: return "" s = file.readline(self) # Next line works too. # s = super(CountedInput, self).readline() self.lineno += 1 if s == "": self.ateof = 1 return s f = file(name=test_support.TESTFN, mode='w') lines = ['a\n', 'b\n', 'c\n'] try: f.writelines(lines) f.close() f = CountedInput(test_support.TESTFN) for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]): got = f.readline() self.assertEqual(expected, got) self.assertEqual(f.lineno, i) self.assertEqual(f.ateof, (i > len(lines))) f.close() finally: try: f.close() except: pass test_support.unlink(test_support.TESTFN) def test_keywords(self): # Testing keyword args to basic type constructors ... self.assertEqual(int(x=1), 1) self.assertEqual(float(x=2), 2.0) self.assertEqual(long(x=3), 3L) self.assertEqual(complex(imag=42, real=666), complex(666, 42)) self.assertEqual(str(object=500), '500') self.assertEqual(unicode(string='abc', errors='strict'), u'abc') self.assertEqual(tuple(sequence=range(3)), (0, 1, 2)) self.assertEqual(list(sequence=(0, 1, 2)), range(3)) # note: as of Python 2.3, dict() no longer has an "items" keyword arg for constructor in (int, float, long, complex, str, unicode, tuple, list, file): try: constructor(bogus_keyword_arg=1) except TypeError: pass else: self.fail("expected TypeError from bogus keyword argument to %r" % constructor) def test_str_subclass_as_dict_key(self): # Testing a str subclass used as dict key .. class cistr(str): """Sublcass of str that computes __eq__ case-insensitively. Also computes a hash code of the string in canonical form. """ def __init__(self, value): self.canonical = value.lower() self.hashcode = hash(self.canonical) def __eq__(self, other): if not isinstance(other, cistr): other = cistr(other) return self.canonical == other.canonical def __hash__(self): return self.hashcode self.assertEqual(cistr('ABC'), 'abc') self.assertEqual('aBc', cistr('ABC')) self.assertEqual(str(cistr('ABC')), 'ABC') d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3} self.assertEqual(d[cistr('one')], 1) self.assertEqual(d[cistr('tWo')], 2) self.assertEqual(d[cistr('THrEE')], 3) self.assertIn(cistr('ONe'), d) self.assertEqual(d.get(cistr('thrEE')), 3) def test_classic_comparisons(self): # Testing classic comparisons... class classic: pass for base in (classic, int, object): class C(base): def __init__(self, value): self.value = int(value) def __cmp__(self, other): if isinstance(other, C): return cmp(self.value, other.value) if isinstance(other, int) or isinstance(other, long): return cmp(self.value, other) return NotImplemented __hash__ = None # Silence Py3k warning c1 = C(1) c2 = C(2) c3 = C(3) self.assertEqual(c1, 1) c = {1: c1, 2: c2, 3: c3} for x in 1, 2, 3: for y in 1, 2, 3: self.assertTrue(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y)) for op in "<", "<=", "==", "!=", ">", ">=": self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op), "x=%d, y=%d" % (x, y)) self.assertTrue(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y)) self.assertTrue(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y)) def test_rich_comparisons(self): # Testing rich comparisons... class Z(complex): pass z = Z(1) self.assertEqual(z, 1+0j) self.assertEqual(1+0j, z) class ZZ(complex): def __eq__(self, other): try: return abs(self - other) <= 1e-6 except: return NotImplemented __hash__ = None # Silence Py3k warning zz = ZZ(1.0000003) self.assertEqual(zz, 1+0j) self.assertEqual(1+0j, zz) class classic: pass for base in (classic, int, object, list): class C(base): def __init__(self, value): self.value = int(value) def __cmp__(self_, other): self.fail("shouldn't call __cmp__") __hash__ = None # Silence Py3k warning def __eq__(self, other): if isinstance(other, C): return self.value == other.value if isinstance(other, int) or isinstance(other, long): return self.value == other return NotImplemented def __ne__(self, other): if isinstance(other, C): return self.value != other.value if isinstance(other, int) or isinstance(other, long): return self.value != other return NotImplemented def __lt__(self, other): if isinstance(other, C): return self.value < other.value if isinstance(other, int) or isinstance(other, long): return self.value < other return NotImplemented def __le__(self, other): if isinstance(other, C): return self.value <= other.value if isinstance(other, int) or isinstance(other, long): return self.value <= other return NotImplemented def __gt__(self, other): if isinstance(other, C): return self.value > other.value if isinstance(other, int) or isinstance(other, long): return self.value > other return NotImplemented def __ge__(self, other): if isinstance(other, C): return self.value >= other.value if isinstance(other, int) or isinstance(other, long): return self.value >= other return NotImplemented c1 = C(1) c2 = C(2) c3 = C(3) self.assertEqual(c1, 1) c = {1: c1, 2: c2, 3: c3} for x in 1, 2, 3: for y in 1, 2, 3: for op in "<", "<=", "==", "!=", ">", ">=": self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op), "x=%d, y=%d" % (x, y)) self.assertTrue(eval("c[x] %s y" % op) == eval("x %s y" % op), "x=%d, y=%d" % (x, y)) self.assertTrue(eval("x %s c[y]" % op) == eval("x %s y" % op), "x=%d, y=%d" % (x, y)) def test_coercions(self): # Testing coercions... class I(int): pass coerce(I(0), 0) coerce(0, I(0)) class L(long): pass coerce(L(0), 0) coerce(L(0), 0L) coerce(0, L(0)) coerce(0L, L(0)) class F(float): pass coerce(F(0), 0) coerce(F(0), 0L) coerce(F(0), 0.) coerce(0, F(0)) coerce(0L, F(0)) coerce(0., F(0)) class C(complex): pass coerce(C(0), 0) coerce(C(0), 0L) coerce(C(0), 0.) coerce(C(0), 0j) coerce(0, C(0)) coerce(0L, C(0)) coerce(0., C(0)) coerce(0j, C(0)) def test_descrdoc(self): # Testing descriptor doc strings... def check(descr, what): self.assertEqual(descr.__doc__, what) check(file.closed, "True if the file is closed") # getset descriptor check(file.name, "file name") # member descriptor def test_doc_descriptor(self): # Testing __doc__ descriptor... # SF bug 542984 class DocDescr(object): def __get__(self, object, otype): if object: object = object.__class__.__name__ + ' instance' if otype: otype = otype.__name__ return 'object=%s; type=%s' % (object, otype) class OldClass: __doc__ = DocDescr() class NewClass(object): __doc__ = DocDescr() self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass') self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass') self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass') self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass') def test_set_class(self): # Testing __class__ assignment... class C(object): pass class D(object): pass class E(object): pass class F(D, E): pass for cls in C, D, E, F: for cls2 in C, D, E, F: x = cls() x.__class__ = cls2 self.assertTrue(x.__class__ is cls2) x.__class__ = cls self.assertTrue(x.__class__ is cls) def cant(x, C): try: x.__class__ = C except TypeError: pass else: self.fail("shouldn't allow %r.__class__ = %r" % (x, C)) try: delattr(x, "__class__") except (TypeError, AttributeError): pass else: self.fail("shouldn't allow del %r.__class__" % x) cant(C(), list) cant(list(), C) cant(C(), 1) cant(C(), object) cant(object(), list) cant(list(), object) class Int(int): __slots__ = [] cant(2, Int) cant(Int(), int) cant(True, int) cant(2, bool) o = object() cant(o, type(1)) cant(o, type(None)) del o class G(object): __slots__ = ["a", "b"] class H(object): __slots__ = ["b", "a"] try: unicode except NameError: class I(object): __slots__ = ["a", "b"] else: class I(object): __slots__ = [unicode("a"), unicode("b")] class J(object): __slots__ = ["c", "b"] class K(object): __slots__ = ["a", "b", "d"] class L(H): __slots__ = ["e"] class M(I): __slots__ = ["e"] class N(J): __slots__ = ["__weakref__"] class P(J): __slots__ = ["__dict__"] class Q(J): pass class R(J): __slots__ = ["__dict__", "__weakref__"] for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)): x = cls() x.a = 1 x.__class__ = cls2 self.assertTrue(x.__class__ is cls2, "assigning %r as __class__ for %r silently failed" % (cls2, x)) self.assertEqual(x.a, 1) x.__class__ = cls self.assertTrue(x.__class__ is cls, "assigning %r as __class__ for %r silently failed" % (cls, x)) self.assertEqual(x.a, 1) for cls in G, J, K, L, M, N, P, R, list, Int: for cls2 in G, J, K, L, M, N, P, R, list, Int: if cls is cls2: continue cant(cls(), cls2) # Issue5283: when __class__ changes in __del__, the wrong # type gets DECREF'd. class O(object): pass class A(object): def __del__(self): self.__class__ = O l = [A() for x in range(100)] del l def test_set_dict(self): # Testing __dict__ assignment... class C(object): pass a = C() a.__dict__ = {'b': 1} self.assertEqual(a.b, 1) def cant(x, dict): try: x.__dict__ = dict except (AttributeError, TypeError): pass else: self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict)) cant(a, None) cant(a, []) cant(a, 1) del a.__dict__ # Deleting __dict__ is allowed class Base(object): pass def verify_dict_readonly(x): """ x has to be an instance of a class inheriting from Base. """ cant(x, {}) try: del x.__dict__ except (AttributeError, TypeError): pass else: self.fail("shouldn't allow del %r.__dict__" % x) dict_descr = Base.__dict__["__dict__"] try: dict_descr.__set__(x, {}) except (AttributeError, TypeError): pass else: self.fail("dict_descr allowed access to %r's dict" % x) # Classes don't allow __dict__ assignment and have readonly dicts class Meta1(type, Base): pass class Meta2(Base, type): pass class D(object): __metaclass__ = Meta1 class E(object): __metaclass__ = Meta2 for cls in C, D, E: verify_dict_readonly(cls) class_dict = cls.__dict__ try: class_dict["spam"] = "eggs" except TypeError: pass else: self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): pass class Module2(Base, types.ModuleType): pass for ModuleType in Module1, Module2: mod = ModuleType("spam") verify_dict_readonly(mod) mod.__dict__["spam"] = "eggs" # Exception's __dict__ can be replaced, but not deleted # (at least not any more than regular exception's __dict__ can # be deleted; on CPython it is not the case, whereas on PyPy they # can, just like any other new-style instance's __dict__.) def can_delete_dict(e): try: del e.__dict__ except (TypeError, AttributeError): return False else: return True class Exception1(Exception, Base): pass class Exception2(Base, Exception): pass for ExceptionType in Exception, Exception1, Exception2: e = ExceptionType() e.__dict__ = {"a": 1} self.assertEqual(e.a, 1) self.assertEqual(can_delete_dict(e), can_delete_dict(ValueError())) def test_pickles(self): # Testing pickling and copying new-style classes and objects... import pickle, cPickle def sorteditems(d): L = d.items() L.sort() return L global C class C(object): def __init__(self, a, b): super(C, self).__init__() self.a = a self.b = b def __repr__(self): return "C(%r, %r)" % (self.a, self.b) global C1 class C1(list): def __new__(cls, a, b): return super(C1, cls).__new__(cls) def __getnewargs__(self): return (self.a, self.b) def __init__(self, a, b): self.a = a self.b = b def __repr__(self): return "C1(%r, %r)<%r>" % (self.a, self.b, list(self)) global C2 class C2(int): def __new__(cls, a, b, val=0): return super(C2, cls).__new__(cls, val) def __getnewargs__(self): return (self.a, self.b, int(self)) def __init__(self, a, b, val=0): self.a = a self.b = b def __repr__(self): return "C2(%r, %r)<%r>" % (self.a, self.b, int(self)) global C3 class C3(object): def __init__(self, foo): self.foo = foo def __getstate__(self): return self.foo def __setstate__(self, foo): self.foo = foo global C4classic, C4 class C4classic: # classic pass class C4(C4classic, object): # mixed inheritance pass for p in pickle, cPickle: for bin in 0, 1: for cls in C, C1, C2: s = p.dumps(cls, bin) cls2 = p.loads(s) self.assertTrue(cls2 is cls) a = C1(1, 2); a.append(42); a.append(24) b = C2("hello", "world", 42) s = p.dumps((a, b), bin) x, y = p.loads(s) self.assertEqual(x.__class__, a.__class__) self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__)) self.assertEqual(y.__class__, b.__class__) self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__)) self.assertEqual(repr(x), repr(a)) self.assertEqual(repr(y), repr(b)) # Test for __getstate__ and __setstate__ on new style class u = C3(42) s = p.dumps(u, bin) v = p.loads(s) self.assertEqual(u.__class__, v.__class__) self.assertEqual(u.foo, v.foo) # Test for picklability of hybrid class u = C4() u.foo = 42 s = p.dumps(u, bin) v = p.loads(s) self.assertEqual(u.__class__, v.__class__) self.assertEqual(u.foo, v.foo) # Testing copy.deepcopy() import copy for cls in C, C1, C2: cls2 = copy.deepcopy(cls) self.assertTrue(cls2 is cls) a = C1(1, 2); a.append(42); a.append(24) b = C2("hello", "world", 42) x, y = copy.deepcopy((a, b)) self.assertEqual(x.__class__, a.__class__) self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__)) self.assertEqual(y.__class__, b.__class__) self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__)) self.assertEqual(repr(x), repr(a)) self.assertEqual(repr(y), repr(b)) def test_pickle_slots(self): # Testing pickling of classes with __slots__ ... import pickle, cPickle # Pickling of classes with __slots__ but without __getstate__ should fail global B, C, D, E class B(object): pass for base in [object, B]: class C(base): __slots__ = ['a'] class D(C): pass try: pickle.dumps(C()) except TypeError: pass else: self.fail("should fail: pickle C instance - %s" % base) try: cPickle.dumps(C()) except TypeError: pass else: self.fail("should fail: cPickle C instance - %s" % base) try: pickle.dumps(C()) except TypeError: pass else: self.fail("should fail: pickle D instance - %s" % base) try: cPickle.dumps(D()) except TypeError: pass else: self.fail("should fail: cPickle D instance - %s" % base) # Give C a nice generic __getstate__ and __setstate__ class C(base): __slots__ = ['a'] def __getstate__(self): try: d = self.__dict__.copy() except AttributeError: d = {} for cls in self.__class__.__mro__: for sn in cls.__dict__.get('__slots__', ()): try: d[sn] = getattr(self, sn) except AttributeError: pass return d def __setstate__(self, d): for k, v in d.items(): setattr(self, k, v) class D(C): pass # Now it should work x = C() y = pickle.loads(pickle.dumps(x)) self.assertEqual(hasattr(y, 'a'), 0) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(hasattr(y, 'a'), 0) x.a = 42 y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.a, 42) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(y.a, 42) x = D() x.a = 42 x.b = 100 y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.a + y.b, 142) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(y.a + y.b, 142) # A subclass that adds a slot should also work class E(C): __slots__ = ['b'] x = E() x.a = 42 x.b = "foo" y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.a, x.a) self.assertEqual(y.b, x.b) y = cPickle.loads(cPickle.dumps(x)) self.assertEqual(y.a, x.a) self.assertEqual(y.b, x.b) def test_binary_operator_override(self): # Testing overrides of binary operations... class I(int): def __repr__(self): return "I(%r)" % int(self) def __add__(self, other): return I(int(self) + int(other)) __radd__ = __add__ def __pow__(self, other, mod=None): if mod is None: return I(pow(int(self), int(other))) else: return I(pow(int(self), int(other), int(mod))) def __rpow__(self, other, mod=None): if mod is None: return I(pow(int(other), int(self), mod)) else: return I(pow(int(other), int(self), int(mod))) self.assertEqual(repr(I(1) + I(2)), "I(3)") self.assertEqual(repr(I(1) + 2), "I(3)") self.assertEqual(repr(1 + I(2)), "I(3)") self.assertEqual(repr(I(2) ** I(3)), "I(8)") self.assertEqual(repr(2 ** I(3)), "I(8)") self.assertEqual(repr(I(2) ** 3), "I(8)") self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)") class S(str): def __eq__(self, other): return self.lower() == other.lower() __hash__ = None # Silence Py3k warning def test_subclass_propagation(self): # Testing propagation of slot functions to subclasses... class A(object): pass class B(A): pass class C(A): pass class D(B, C): pass d = D() orig_hash = hash(d) # related to id(d) in platform-dependent ways A.__hash__ = lambda self: 42 self.assertEqual(hash(d), 42) C.__hash__ = lambda self: 314 self.assertEqual(hash(d), 314) B.__hash__ = lambda self: 144 self.assertEqual(hash(d), 144) D.__hash__ = lambda self: 100 self.assertEqual(hash(d), 100) D.__hash__ = None self.assertRaises(TypeError, hash, d) del D.__hash__ self.assertEqual(hash(d), 144) B.__hash__ = None self.assertRaises(TypeError, hash, d) del B.__hash__ self.assertEqual(hash(d), 314) C.__hash__ = None self.assertRaises(TypeError, hash, d) del C.__hash__ self.assertEqual(hash(d), 42) A.__hash__ = None self.assertRaises(TypeError, hash, d) del A.__hash__ self.assertEqual(hash(d), orig_hash) d.foo = 42 d.bar = 42 self.assertEqual(d.foo, 42) self.assertEqual(d.bar, 42) def __getattribute__(self, name): if name == "foo": return 24 return object.__getattribute__(self, name) A.__getattribute__ = __getattribute__ self.assertEqual(d.foo, 24) self.assertEqual(d.bar, 42) def __getattr__(self, name): if name in ("spam", "foo", "bar"): return "hello" raise AttributeError, name B.__getattr__ = __getattr__ self.assertEqual(d.spam, "hello") self.assertEqual(d.foo, 24) self.assertEqual(d.bar, 42) del A.__getattribute__ self.assertEqual(d.foo, 42) del d.foo self.assertEqual(d.foo, "hello") self.assertEqual(d.bar, 42) del B.__getattr__ try: d.foo except AttributeError: pass else: self.fail("d.foo should be undefined now") # Test a nasty bug in recurse_down_subclasses() class A(object): pass class B(A): pass del B test_support.gc_collect() A.__setitem__ = lambda *a: None # crash def test_buffer_inheritance(self): # Testing that buffer interface is inherited ... import binascii # SF bug [#470040] ParseTuple t# vs subclasses. class MyStr(str): pass base = 'abc' m = MyStr(base) # b2a_hex uses the buffer interface to get its argument's value, via # PyArg_ParseTuple 't#' code. self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base)) # It's not clear that unicode will continue to support the character # buffer interface, and this test will fail if that's taken away. class MyUni(unicode): pass base = u'abc' m = MyUni(base) self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base)) class MyInt(int): pass m = MyInt(42) try: binascii.b2a_hex(m) self.fail('subclass of int should not have a buffer interface') except TypeError: pass def test_str_of_str_subclass(self): # Testing __str__ defined in subclass of str ... import binascii import cStringIO class octetstring(str): def __str__(self): return binascii.b2a_hex(self) def __repr__(self): return self + " repr" o = octetstring('A') self.assertEqual(type(o), octetstring) self.assertEqual(type(str(o)), str) self.assertEqual(type(repr(o)), str) self.assertEqual(ord(o), 0x41) self.assertEqual(str(o), '41') self.assertEqual(repr(o), 'A repr') self.assertEqual(o.__str__(), '41') self.assertEqual(o.__repr__(), 'A repr') capture = cStringIO.StringIO() # Calling str() or not exercises different internal paths. print >> capture, o print >> capture, str(o) self.assertEqual(capture.getvalue(), '41\n41\n') capture.close() def test_keyword_arguments(self): # Testing keyword arguments to __init__, __call__... def f(a): return a self.assertEqual(f.__call__(a=42), 42) a = [] list.__init__(a, sequence=[0, 1, 2]) self.assertEqual(a, [0, 1, 2]) def test_recursive_call(self): # Testing recursive __call__() by setting to instance of class... class A(object): pass A.__call__ = A() try: A()() except RuntimeError: pass else: self.fail("Recursion limit should have been reached for __call__()") def test_delete_hook(self): # Testing __del__ hook... log = [] class C(object): def __del__(self): log.append(1) c = C() self.assertEqual(log, []) del c test_support.gc_collect() self.assertEqual(log, [1]) class D(object): pass d = D() try: del d[0] except TypeError: pass else: self.fail("invalid del() didn't raise TypeError") def test_hash_inheritance(self): # Testing hash of mutable subclasses... class mydict(dict): pass d = mydict() try: hash(d) except TypeError: pass else: self.fail("hash() of dict subclass should fail") class mylist(list): pass d = mylist() try: hash(d) except TypeError: pass else: self.fail("hash() of list subclass should fail") def test_str_operations(self): try: 'a' + 5 except TypeError: pass else: self.fail("'' + 5 doesn't raise TypeError") try: ''.split('') except ValueError: pass else: self.fail("''.split('') doesn't raise ValueError") try: ''.join([0]) except TypeError: pass else: self.fail("''.join([0]) doesn't raise TypeError") try: ''.rindex('5') except ValueError: pass else: self.fail("''.rindex('5') doesn't raise ValueError") try: '%(n)s' % None except TypeError: pass else: self.fail("'%(n)s' % None doesn't raise TypeError") try: '%(n' % {} except ValueError: pass else: self.fail("'%(n' % {} '' doesn't raise ValueError") try: '%*s' % ('abc') except TypeError: pass else: self.fail("'%*s' % ('abc') doesn't raise TypeError") try: '%*.*s' % ('abc', 5) except TypeError: pass else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError") try: '%s' % (1, 2) except TypeError: pass else: self.fail("'%s' % (1, 2) doesn't raise TypeError") try: '%' % None except ValueError: pass else: self.fail("'%' % None doesn't raise ValueError") self.assertEqual('534253'.isdigit(), 1) self.assertEqual('534253x'.isdigit(), 0) self.assertEqual('%c' % 5, '\x05') self.assertEqual('%c' % '5', '5') def test_deepcopy_recursive(self): # Testing deepcopy of recursive objects... class Node: pass a = Node() b = Node() a.b = b b.a = a z = deepcopy(a) # This blew up before def test_unintialized_modules(self): # Testing uninitialized module objects... from types import ModuleType as M m = M.__new__(M) str(m) self.assertEqual(hasattr(m, "__name__"), 0) self.assertEqual(hasattr(m, "__file__"), 0) self.assertEqual(hasattr(m, "foo"), 0) self.assertFalse(m.__dict__) # None or {} are both reasonable answers m.foo = 1 self.assertEqual(m.__dict__, {"foo": 1}) def test_funny_new(self): # Testing __new__ returning something unexpected... class C(object): def __new__(cls, arg): if isinstance(arg, str): return [1, 2, 3] elif isinstance(arg, int): return object.__new__(D) else: return object.__new__(cls) class D(C): def __init__(self, arg): self.foo = arg self.assertEqual(C("1"), [1, 2, 3]) self.assertEqual(D("1"), [1, 2, 3]) d = D(None) self.assertEqual(d.foo, None) d = C(1) self.assertEqual(isinstance(d, D), True) self.assertEqual(d.foo, 1) d = D(1) self.assertEqual(isinstance(d, D), True) self.assertEqual(d.foo, 1) def test_imul_bug(self): # Testing for __imul__ problems... # SF bug 544647 class C(object): def __imul__(self, other): return (self, other) x = C() y = x y *= 1.0 self.assertEqual(y, (x, 1.0)) y = x y *= 2 self.assertEqual(y, (x, 2)) y = x y *= 3L self.assertEqual(y, (x, 3L)) y = x y *= 1L<<100 self.assertEqual(y, (x, 1L<<100)) y = x y *= None self.assertEqual(y, (x, None)) y = x y *= "foo" self.assertEqual(y, (x, "foo")) def test_copy_setstate(self): # Testing that copy.*copy() correctly uses __setstate__... import copy class C(object): def __init__(self, foo=None): self.foo = foo self.__foo = foo def setfoo(self, foo=None): self.foo = foo def getfoo(self): return self.__foo def __getstate__(self): return [self.foo] def __setstate__(self_, lst): self.assertEqual(len(lst), 1) self_.__foo = self_.foo = lst[0] a = C(42) a.setfoo(24) self.assertEqual(a.foo, 24) self.assertEqual(a.getfoo(), 42) b = copy.copy(a) self.assertEqual(b.foo, 24) self.assertEqual(b.getfoo(), 24) b = copy.deepcopy(a) self.assertEqual(b.foo, 24) self.assertEqual(b.getfoo(), 24) def test_slices(self): # Testing cases with slices and overridden __getitem__ ... # Strings self.assertEqual("hello"[:4], "hell") self.assertEqual("hello"[slice(4)], "hell") self.assertEqual(str.__getitem__("hello", slice(4)), "hell") class S(str): def __getitem__(self, x): return str.__getitem__(self, x) self.assertEqual(S("hello")[:4], "hell") self.assertEqual(S("hello")[slice(4)], "hell") self.assertEqual(S("hello").__getitem__(slice(4)), "hell") # Tuples self.assertEqual((1,2,3)[:2], (1,2)) self.assertEqual((1,2,3)[slice(2)], (1,2)) self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2)) class T(tuple): def __getitem__(self, x): return tuple.__getitem__(self, x) self.assertEqual(T((1,2,3))[:2], (1,2)) self.assertEqual(T((1,2,3))[slice(2)], (1,2)) self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2)) # Lists self.assertEqual([1,2,3][:2], [1,2]) self.assertEqual([1,2,3][slice(2)], [1,2]) self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2]) class L(list): def __getitem__(self, x): return list.__getitem__(self, x) self.assertEqual(L([1,2,3])[:2], [1,2]) self.assertEqual(L([1,2,3])[slice(2)], [1,2]) self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2]) # Now do lists and __setitem__ a = L([1,2,3]) a[slice(1, 3)] = [3,2] self.assertEqual(a, [1,3,2]) a[slice(0, 2, 1)] = [3,1] self.assertEqual(a, [3,1,2]) a.__setitem__(slice(1, 3), [2,1]) self.assertEqual(a, [3,2,1]) a.__setitem__(slice(0, 2, 1), [2,3]) self.assertEqual(a, [2,3,1]) def test_subtype_resurrection(self): # Testing resurrection of new-style instance... class C(object): container = [] def __del__(self): # resurrect the instance C.container.append(self) c = C() c.attr = 42 # The most interesting thing here is whether this blows up, due to # flawed GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 # bug). del c # If that didn't blow up, it's also interesting to see whether clearing # the last container slot works: that will attempt to delete c again, # which will cause c to get appended back to the container again # "during" the del. (On non-CPython implementations, however, __del__ # is typically not called again.) test_support.gc_collect() self.assertEqual(len(C.container), 1) del C.container[-1] if test_support.check_impl_detail(): test_support.gc_collect() self.assertEqual(len(C.container), 1) self.assertEqual(C.container[-1].attr, 42) # Make c mortal again, so that the test framework with -l doesn't report # it as a leak. del C.__del__ def test_slots_trash(self): # Testing slot trash... # Deallocating deeply nested slotted trash caused stack overflows class trash(object): __slots__ = ['x'] def __init__(self, x): self.x = x o = None for i in xrange(50000): o = trash(o) del o def test_slots_multiple_inheritance(self): # SF bug 575229, multiple inheritance w/ slots dumps core class A(object): __slots__=() class B(object): pass class C(A,B) : __slots__=() if test_support.check_impl_detail(): self.assertEqual(C.__basicsize__, B.__basicsize__) self.assertTrue(hasattr(C, '__dict__')) self.assertTrue(hasattr(C, '__weakref__')) C().x = 2 def test_rmul(self): # Testing correct invocation of __rmul__... # SF patch 592646 class C(object): def __mul__(self, other): return "mul" def __rmul__(self, other): return "rmul" a = C() self.assertEqual(a*2, "mul") self.assertEqual(a*2.2, "mul") self.assertEqual(2*a, "rmul") self.assertEqual(2.2*a, "rmul") def test_ipow(self): # Testing correct invocation of __ipow__... # [SF bug 620179] class C(object): def __ipow__(self, other): pass a = C() a **= 2 def test_mutable_bases(self): # Testing mutable bases... # stuff that should work: class C(object): pass class C2(object): def __getattribute__(self, attr): if attr == 'a': return 2 else: return super(C2, self).__getattribute__(attr) def meth(self): return 1 class D(C): pass class E(D): pass d = D() e = E() D.__bases__ = (C,) D.__bases__ = (C2,) self.assertEqual(d.meth(), 1) self.assertEqual(e.meth(), 1) self.assertEqual(d.a, 2) self.assertEqual(e.a, 2) self.assertEqual(C2.__subclasses__(), [D]) try: del D.__bases__ except (TypeError, AttributeError): pass else: self.fail("shouldn't be able to delete .__bases__") try: D.__bases__ = () except TypeError, msg: if str(msg) == "a new-style class can't have only classic bases": self.fail("wrong error message for .__bases__ = ()") else: self.fail("shouldn't be able to set .__bases__ to ()") try: D.__bases__ = (D,) except TypeError: pass else: # actually, we'll have crashed by here... self.fail("shouldn't be able to create inheritance cycles") try: D.__bases__ = (C, C) except TypeError: pass else: self.fail("didn't detect repeated base classes") try: D.__bases__ = (E,) except TypeError: pass else: self.fail("shouldn't be able to create inheritance cycles") # let's throw a classic class into the mix: class Classic: def meth2(self): return 3 D.__bases__ = (C, Classic) self.assertEqual(d.meth2(), 3) self.assertEqual(e.meth2(), 3) try: d.a except AttributeError: pass else: self.fail("attribute should have vanished") try: D.__bases__ = (Classic,) except TypeError: pass else: self.fail("new-style class must have a new-style base") def test_builtin_bases(self): # Make sure all the builtin types can have their base queried without # segfaulting. See issue #5787. builtin_types = [tp for tp in __builtin__.__dict__.itervalues() if isinstance(tp, type)] for tp in builtin_types: object.__getattribute__(tp, "__bases__") if tp is not object: self.assertEqual(len(tp.__bases__), 1, tp) class L(list): pass class C(object): pass class D(C): pass try: L.__bases__ = (dict,) except TypeError: pass else: self.fail("shouldn't turn list subclass into dict subclass") try: list.__bases__ = (dict,) except TypeError: pass else: self.fail("shouldn't be able to assign to list.__bases__") try: D.__bases__ = (C, list) except TypeError: pass else: assert 0, "best_base calculation found wanting" def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... class WorkOnce(type): def __new__(self, name, bases, ns): self.flag = 0 return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns) def mro(self): if self.flag > 0: raise RuntimeError, "bozo" else: self.flag += 1 return type.mro(self) class WorkAlways(type): def mro(self): # this is here to make sure that .mro()s aren't called # with an exception set (which was possible at one point). # An error message will be printed in a debug build. # What's a good way to test for this? return type.mro(self) class C(object): pass class C2(object): pass class D(C): pass class E(D): pass class F(D): __metaclass__ = WorkOnce class G(D): __metaclass__ = WorkAlways # Immediate subclasses have their mro's adjusted in alphabetical # order, so E's will get adjusted before adjusting F's fails. We # check here that E's gets restored. E_mro_before = E.__mro__ D_mro_before = D.__mro__ try: D.__bases__ = (C2,) except RuntimeError: self.assertEqual(E.__mro__, E_mro_before) self.assertEqual(D.__mro__, D_mro_before) else: self.fail("exception not propagated") def test_mutable_bases_catch_mro_conflict(self): # Testing mutable bases catch mro conflict... class A(object): pass class B(object): pass class C(A, B): pass class D(A, B): pass class E(C, D): pass try: C.__bases__ = (B, A) except TypeError: pass else: self.fail("didn't catch MRO conflict") def test_mutable_names(self): # Testing mutable names... class C(object): pass # C.__module__ could be 'test_descr' or '__main__' mod = C.__module__ C.__name__ = 'D' self.assertEqual((C.__module__, C.__name__), (mod, 'D')) C.__name__ = 'D.E' self.assertEqual((C.__module__, C.__name__), (mod, 'D.E')) def test_subclass_right_op(self): # Testing correct dispatch of subclass overloading __r<op>__... # This code tests various cases where right-dispatch of a subclass # should be preferred over left-dispatch of a base class. # Case 1: subclass of int; this tests code in abstract.c::binary_op1() class B(int): def __floordiv__(self, other): return "B.__floordiv__" def __rfloordiv__(self, other): return "B.__rfloordiv__" self.assertEqual(B(1) // 1, "B.__floordiv__") self.assertEqual(1 // B(1), "B.__rfloordiv__") # Case 2: subclass of object; this is just the baseline for case 3 class C(object): def __floordiv__(self, other): return "C.__floordiv__" def __rfloordiv__(self, other): return "C.__rfloordiv__" self.assertEqual(C() // 1, "C.__floordiv__") self.assertEqual(1 // C(), "C.__rfloordiv__") # Case 3: subclass of new-style class; here it gets interesting class D(C): def __floordiv__(self, other): return "D.__floordiv__" def __rfloordiv__(self, other): return "D.__rfloordiv__" self.assertEqual(D() // C(), "D.__floordiv__") self.assertEqual(C() // D(), "D.__rfloordiv__") # Case 4: this didn't work right in 2.2.2 and 2.3a1 class E(C): pass self.assertEqual(E.__rfloordiv__, C.__rfloordiv__) self.assertEqual(E() // 1, "C.__floordiv__") self.assertEqual(1 // E(), "C.__rfloordiv__") self.assertEqual(E() // C(), "C.__floordiv__") self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail @test_support.impl_detail("testing an internal kind of method object") def test_meth_class_get(self): # Testing __get__ method of METH_CLASS C methods... # Full coverage of descrobject.c::classmethod_get() # Baseline arg = [1, 2, 3] res = {1: None, 2: None, 3: None} self.assertEqual(dict.fromkeys(arg), res) self.assertEqual({}.fromkeys(arg), res) # Now get the descriptor descr = dict.__dict__["fromkeys"] # More baseline using the descriptor directly self.assertEqual(descr.__get__(None, dict)(arg), res) self.assertEqual(descr.__get__({})(arg), res) # Now check various error cases try: descr.__get__(None, None) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(None, None)") try: descr.__get__(42) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(42)") try: descr.__get__(None, 42) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(None, 42)") try: descr.__get__(None, int) except TypeError: pass else: self.fail("shouldn't have allowed descr.__get__(None, int)") def test_isinst_isclass(self): # Testing proxy isinstance() and isclass()... class Proxy(object): def __init__(self, obj): self.__obj = obj def __getattribute__(self, name): if name.startswith("_Proxy__"): return object.__getattribute__(self, name) else: return getattr(self.__obj, name) # Test with a classic class class C: pass a = C() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test # Test with a classic subclass class D(C): pass a = D() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test # Test with a new-style class class C(object): pass a = C() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test # Test with a new-style subclass class D(C): pass a = D() pa = Proxy(a) self.assertIsInstance(a, C) # Baseline self.assertIsInstance(pa, C) # Test def test_proxy_super(self): # Testing super() for a proxy object... class Proxy(object): def __init__(self, obj): self.__obj = obj def __getattribute__(self, name): if name.startswith("_Proxy__"): return object.__getattribute__(self, name) else: return getattr(self.__obj, name) class B(object): def f(self): return "B.f" class C(B): def f(self): return super(C, self).f() + "->C.f" obj = C() p = Proxy(obj) self.assertEqual(C.__dict__["f"](p), "B.f->C.f") def test_carloverre(self): # Testing prohibition of Carlo Verre's hack... try: object.__setattr__(str, "foo", 42) except TypeError: pass else: self.fail("Carlo Verre __setattr__ succeeded!") try: object.__delattr__(str, "lower") except TypeError: pass else: self.fail("Carlo Verre __delattr__ succeeded!") def test_weakref_segfault(self): # Testing weakref segfault... # SF 742911 import weakref class Provoker: def __init__(self, referrent): self.ref = weakref.ref(referrent) def __del__(self): x = self.ref() class Oops(object): pass o = Oops() o.whatever = Provoker(o) del o def test_wrapper_segfault(self): # SF 927248: deeply nested wrappers could cause stack overflow f = lambda:None for i in xrange(1000000): f = f.__call__ f = None def test_file_fault(self): # Testing sys.stdout is changed in getattr... test_stdout = sys.stdout class StdoutGuard: def __getattr__(self, attr): sys.stdout = sys.__stdout__ raise RuntimeError("Premature access to sys.stdout.%s" % attr) sys.stdout = StdoutGuard() try: print "Oops!" except RuntimeError: pass finally: sys.stdout = test_stdout def test_vicious_descriptor_nonsense(self): # Testing vicious_descriptor_nonsense... # A potential segfault spotted by Thomas Wouters in mail to # python-dev 2003-04-17, turned into an example & fixed by Michael # Hudson just less than four months later... class Evil(object): def __hash__(self): return hash('attr') def __eq__(self, other): del C.attr return 0 class Descr(object): def __get__(self, ob, type=None): return 1 class C(object): attr = Descr() c = C() c.__dict__[Evil()] = 0 self.assertEqual(c.attr, 1) # this makes a crash more likely: test_support.gc_collect() self.assertEqual(hasattr(c, 'attr'), False) def test_init(self): # SF 1155938 class Foo(object): def __init__(self): return 10 try: Foo() except TypeError: pass else: self.fail("did not test __init__() for None return") def test_method_wrapper(self): # Testing method-wrapper objects... # <type 'method-wrapper'> did not support any reflection before 2.5 l = [] self.assertEqual(l.__add__, l.__add__) self.assertEqual(l.__add__, [].__add__) self.assertTrue(l.__add__ != [5].__add__) self.assertTrue(l.__add__ != l.__mul__) self.assertTrue(l.__add__.__name__ == '__add__') if hasattr(l.__add__, '__self__'): # CPython self.assertTrue(l.__add__.__self__ is l) self.assertTrue(l.__add__.__objclass__ is list) else: # Python implementations where [].__add__ is a normal bound method self.assertTrue(l.__add__.im_self is l) self.assertTrue(l.__add__.im_class is list) self.assertEqual(l.__add__.__doc__, list.__add__.__doc__) try: hash(l.__add__) except TypeError: pass else: self.fail("no TypeError from hash([].__add__)") t = () t += (7,) self.assertEqual(t.__add__, (7,).__add__) self.assertEqual(hash(t.__add__), hash((7,).__add__)) def test_not_implemented(self): # Testing NotImplemented... # all binary methods should be able to return a NotImplemented import operator def specialmethod(self, other): return NotImplemented def check(expr, x, y): try: exec expr in {'x': x, 'y': y, 'operator': operator} except TypeError: pass else: self.fail("no TypeError from %r" % (expr,)) N1 = sys.maxint + 1L # might trigger OverflowErrors instead of # TypeErrors N2 = sys.maxint # if sizeof(int) < sizeof(long), might trigger # ValueErrors instead of TypeErrors for metaclass in [type, types.ClassType]: for name, expr, iexpr in [ ('__add__', 'x + y', 'x += y'), ('__sub__', 'x - y', 'x -= y'), ('__mul__', 'x * y', 'x *= y'), ('__truediv__', 'operator.truediv(x, y)', None), ('__floordiv__', 'operator.floordiv(x, y)', None), ('__div__', 'x / y', 'x /= y'), ('__mod__', 'x % y', 'x %= y'), ('__divmod__', 'divmod(x, y)', None), ('__pow__', 'x ** y', 'x **= y'), ('__lshift__', 'x << y', 'x <<= y'), ('__rshift__', 'x >> y', 'x >>= y'), ('__and__', 'x & y', 'x &= y'), ('__or__', 'x | y', 'x |= y'), ('__xor__', 'x ^ y', 'x ^= y'), ('__coerce__', 'coerce(x, y)', None)]: if name == '__coerce__': rname = name else: rname = '__r' + name[2:] A = metaclass('A', (), {name: specialmethod}) B = metaclass('B', (), {rname: specialmethod}) a = A() b = B() check(expr, a, a) check(expr, a, b) check(expr, b, a) check(expr, b, b) check(expr, a, N1) check(expr, a, N2) check(expr, N1, b) check(expr, N2, b) if iexpr: check(iexpr, a, a) check(iexpr, a, b) check(iexpr, b, a) check(iexpr, b, b) check(iexpr, a, N1) check(iexpr, a, N2) iname = '__i' + name[2:] C = metaclass('C', (), {iname: specialmethod}) c = C() check(iexpr, c, a) check(iexpr, c, b) check(iexpr, c, N1) check(iexpr, c, N2) def test_assign_slice(self): # ceval.c's assign_slice used to check for # tp->tp_as_sequence->sq_slice instead of # tp->tp_as_sequence->sq_ass_slice class C(object): def __setslice__(self, start, stop, value): self.value = value c = C() c[1:2] = 3 self.assertEqual(c.value, 3) def test_set_and_no_get(self): # See # http://mail.python.org/pipermail/python-dev/2010-January/095637.html class Descr(object): def __init__(self, name): self.name = name def __set__(self, obj, value): obj.__dict__[self.name] = value descr = Descr("a") class X(object): a = descr x = X() self.assertIs(x.a, descr) x.a = 42 self.assertEqual(x.a, 42) # Also check type_getattro for correctness. class Meta(type): pass class X(object): __metaclass__ = Meta X.a = 42 Meta.a = Descr("a") self.assertEqual(X.a, 42) def test_getattr_hooks(self): # issue 4230 class Descriptor(object): counter = 0 def __get__(self, obj, objtype=None): def getter(name): self.counter += 1 raise AttributeError(name) return getter descr = Descriptor() class A(object): __getattribute__ = descr class B(object): __getattr__ = descr class C(object): __getattribute__ = descr __getattr__ = descr self.assertRaises(AttributeError, getattr, A(), "attr") self.assertEqual(descr.counter, 1) self.assertRaises(AttributeError, getattr, B(), "attr") self.assertEqual(descr.counter, 2) self.assertRaises(AttributeError, getattr, C(), "attr") self.assertEqual(descr.counter, 4) import gc class EvilGetattribute(object): # This used to segfault def __getattr__(self, name): raise AttributeError(name) def __getattribute__(self, name): del EvilGetattribute.__getattr__ for i in range(5): gc.collect() raise AttributeError(name) self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr") def test_abstractmethods(self): # type pretends not to have __abstractmethods__. self.assertRaises(AttributeError, getattr, type, "__abstractmethods__") class meta(type): pass self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__") class X(object): pass with self.assertRaises(AttributeError): del X.__abstractmethods__ def test_proxy_call(self): class FakeStr(object): __class__ = str fake_str = FakeStr() # isinstance() reads __class__ on new style classes self.assertTrue(isinstance(fake_str, str)) # call a method descriptor with self.assertRaises(TypeError): str.split(fake_str) # call a slot wrapper descriptor with self.assertRaises(TypeError): str.__add__(fake_str, "abc") class DictProxyTests(unittest.TestCase): def setUp(self): class C(object): def meth(self): pass self.C = C def test_iter_keys(self): # Testing dict-proxy iterkeys... keys = [ key for key in self.C.__dict__.iterkeys() ] keys.sort() self.assertEqual(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth']) def test_iter_values(self): # Testing dict-proxy itervalues... values = [ values for values in self.C.__dict__.itervalues() ] self.assertEqual(len(values), 5) def test_iter_items(self): # Testing dict-proxy iteritems... keys = [ key for (key, value) in self.C.__dict__.iteritems() ] keys.sort() self.assertEqual(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth']) def test_dict_type_with_metaclass(self): # Testing type of __dict__ when __metaclass__ set... class B(object): pass class M(type): pass class C: # In 2.3a1, C.__dict__ was a real dict rather than a dict proxy __metaclass__ = M self.assertEqual(type(C.__dict__), type(B.__dict__)) class PTypesLongInitTest(unittest.TestCase): # This is in its own TestCase so that it can be run before any other tests. def test_pytype_long_ready(self): # Testing SF bug 551412 ... # This dumps core when SF bug 551412 isn't fixed -- # but only when test_descr.py is run separately. # (That can't be helped -- as soon as PyType_Ready() # is called for PyLong_Type, the bug is gone.) class UserLong(object): def __pow__(self, *args): pass try: pow(0L, UserLong(), 0L) except: pass # Another segfault only when run early # (before PyType_Ready(tuple) is called) type.mro(tuple) def test_main(): deprecations = [(r'complex divmod\(\), // and % are deprecated$', DeprecationWarning)] if sys.py3kwarning: deprecations += [ ("classic (int|long) division", DeprecationWarning), ("coerce.. not supported", DeprecationWarning), (".+__(get|set|del)slice__ has been removed", DeprecationWarning)] with test_support.check_warnings(*deprecations): # Run all local test cases, with PTypesLongInitTest first. test_support.run_unittest(PTypesLongInitTest, OperatorsTest, ClassPropertiesAndMethods, DictProxyTests) if __name__ == "__main__": test_main()
mit
cloakedcode/CouchPotatoServer
libs/sqlalchemy/engine/base.py
12
120156
# engine/base.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Basic components for SQL execution and interfacing with DB-API. Defines the basic components used to interface DB-API modules with higher-level statement-construction, connection-management, execution and result contexts. """ __all__ = [ 'BufferedColumnResultProxy', 'BufferedColumnRow', 'BufferedRowResultProxy','Compiled', 'Connectable', 'Connection', 'Dialect', 'Engine','ExecutionContext', 'NestedTransaction', 'ResultProxy', 'RootTransaction','RowProxy', 'SchemaIterator', 'StringIO', 'Transaction', 'TwoPhaseTransaction', 'connection_memoize'] import inspect, StringIO, sys, operator from itertools import izip from sqlalchemy import exc, schema, util, types, log, interfaces, \ event, events from sqlalchemy.sql import expression, util as sql_util from sqlalchemy import processors import collections class Dialect(object): """Define the behavior of a specific database and DB-API combination. Any aspect of metadata definition, SQL query generation, execution, result-set handling, or anything else which varies between databases is defined under the general category of the Dialect. The Dialect acts as a factory for other database-specific object implementations including ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. All Dialects implement the following attributes: name identifying name for the dialect from a DBAPI-neutral point of view (i.e. 'sqlite') driver identifying name for the dialect's DBAPI positional True if the paramstyle for this Dialect is positional. paramstyle the paramstyle to be used (some DB-APIs support multiple paramstyles). convert_unicode True if Unicode conversion should be applied to all ``str`` types. encoding type of encoding to use for unicode, usually defaults to 'utf-8'. statement_compiler a :class:`~Compiled` class used to compile SQL statements ddl_compiler a :class:`~Compiled` class used to compile DDL statements server_version_info a tuple containing a version number for the DB backend in use. This value is only available for supporting dialects, and is typically populated during the initial connection to the database. default_schema_name the name of the default schema. This value is only available for supporting dialects, and is typically populated during the initial connection to the database. execution_ctx_cls a :class:`.ExecutionContext` class used to handle statement execution execute_sequence_format either the 'tuple' or 'list' type, depending on what cursor.execute() accepts for the second argument (they vary). preparer a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to quote identifiers. supports_alter ``True`` if the database supports ``ALTER TABLE``. max_identifier_length The maximum length of identifier names. supports_unicode_statements Indicate whether the DB-API can receive SQL statements as Python unicode strings supports_unicode_binds Indicate whether the DB-API can receive string bind parameters as Python unicode strings supports_sane_rowcount Indicate whether the dialect properly implements rowcount for ``UPDATE`` and ``DELETE`` statements. supports_sane_multi_rowcount Indicate whether the dialect properly implements rowcount for ``UPDATE`` and ``DELETE`` statements when executed via executemany. preexecute_autoincrement_sequences True if 'implicit' primary key functions must be executed separately in order to get their value. This is currently oriented towards Postgresql. implicit_returning use RETURNING or equivalent during INSERT execution in order to load newly generated primary keys and other column defaults in one execution, which are then available via inserted_primary_key. If an insert statement has returning() specified explicitly, the "implicit" functionality is not used and inserted_primary_key will not be available. dbapi_type_map A mapping of DB-API type objects present in this Dialect's DB-API implementation mapped to TypeEngine implementations used by the dialect. This is used to apply types to result sets based on the DB-API types present in cursor.description; it only takes effect for result sets against textual statements where no explicit typemap was present. colspecs A dictionary of TypeEngine classes from sqlalchemy.types mapped to subclasses that are specific to the dialect class. This dictionary is class-level only and is not accessed from the dialect instance itself. supports_default_values Indicates if the construct ``INSERT INTO tablename DEFAULT VALUES`` is supported supports_sequences Indicates if the dialect supports CREATE SEQUENCE or similar. sequences_optional If True, indicates if the "optional" flag on the Sequence() construct should signal to not generate a CREATE SEQUENCE. Applies only to dialects that support sequences. Currently used only to allow Postgresql SERIAL to be used on a column that specifies Sequence() for usage on other backends. supports_native_enum Indicates if the dialect supports a native ENUM construct. This will prevent types.Enum from generating a CHECK constraint when that type is used. supports_native_boolean Indicates if the dialect supports a native boolean construct. This will prevent types.Boolean from generating a CHECK constraint when that type is used. """ def create_connect_args(self, url): """Build DB-API compatible connection arguments. Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple consisting of a `*args`/`**kwargs` suitable to send directly to the dbapi's connect function. """ raise NotImplementedError() @classmethod def type_descriptor(cls, typeobj): """Transform a generic type to a dialect-specific type. Dialect classes will usually use the :func:`~sqlalchemy.types.adapt_type` function in the types module to make this job easy. The returned result is cached *per dialect class* so can contain no dialect-instance state. """ raise NotImplementedError() def initialize(self, connection): """Called during strategized creation of the dialect with a connection. Allows dialects to configure options based on server version info or other properties. The connection passed here is a SQLAlchemy Connection object, with full capabilities. The initalize() method of the base dialect should be called via super(). """ pass def reflecttable(self, connection, table, include_columns=None): """Load table description from the database. Given a :class:`.Connection` and a :class:`~sqlalchemy.schema.Table` object, reflect its columns and properties from the database. If include_columns (a list or set) is specified, limit the autoload to the given column names. The default implementation uses the :class:`~sqlalchemy.engine.reflection.Inspector` interface to provide the output, building upon the granular table/column/ constraint etc. methods of :class:`.Dialect`. """ raise NotImplementedError() def get_columns(self, connection, table_name, schema=None, **kw): """Return information about columns in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return column information as a list of dictionaries with these keys: name the column's name type [sqlalchemy.types#TypeEngine] nullable boolean default the column's default value autoincrement boolean sequence a dictionary of the form {'name' : str, 'start' :int, 'increment': int} Additional column attributes may be present. """ raise NotImplementedError() def get_primary_keys(self, connection, table_name, schema=None, **kw): """Return information about primary keys in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return primary key information as a list of column names. """ raise NotImplementedError() def get_pk_constraint(self, table_name, schema=None, **kw): """Return information about the primary key constraint on table_name`. Given a string `table_name`, and an optional string `schema`, return primary key information as a dictionary with these keys: constrained_columns a list of column names that make up the primary key name optional name of the primary key constraint. """ raise NotImplementedError() def get_foreign_keys(self, connection, table_name, schema=None, **kw): """Return information about foreign_keys in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return foreign key information as a list of dicts with these keys: name the constraint's name constrained_columns a list of column names that make up the foreign key referred_schema the name of the referred schema referred_table the name of the referred table referred_columns a list of column names in the referred table that correspond to constrained_columns """ raise NotImplementedError() def get_table_names(self, connection, schema=None, **kw): """Return a list of table names for `schema`.""" raise NotImplementedError def get_view_names(self, connection, schema=None, **kw): """Return a list of all view names available in the database. schema: Optional, retrieve names from a non-default schema. """ raise NotImplementedError() def get_view_definition(self, connection, view_name, schema=None, **kw): """Return view definition. Given a :class:`.Connection`, a string `view_name`, and an optional string `schema`, return the view definition. """ raise NotImplementedError() def get_indexes(self, connection, table_name, schema=None, **kw): """Return information about indexes in `table_name`. Given a :class:`.Connection`, a string `table_name` and an optional string `schema`, return index information as a list of dictionaries with these keys: name the index's name column_names list of column names in order unique boolean """ raise NotImplementedError() def normalize_name(self, name): """convert the given name to lowercase if it is detected as case insensitive. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def denormalize_name(self, name): """convert the given name to a case insensitive identifier for the backend if it is an all-lowercase name. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def has_table(self, connection, table_name, schema=None): """Check the existence of a particular table in the database. Given a :class:`.Connection` object and a string `table_name`, return True if the given table (possibly within the specified `schema`) exists in the database, False otherwise. """ raise NotImplementedError() def has_sequence(self, connection, sequence_name, schema=None): """Check the existence of a particular sequence in the database. Given a :class:`.Connection` object and a string `sequence_name`, return True if the given sequence exists in the database, False otherwise. """ raise NotImplementedError() def _get_server_version_info(self, connection): """Retrieve the server version info from the given connection. This is used by the default implementation to populate the "server_version_info" attribute and is called exactly once upon first connect. """ raise NotImplementedError() def _get_default_schema_name(self, connection): """Return the string name of the currently selected schema from the given connection. This is used by the default implementation to populate the "default_schema_name" attribute and is called exactly once upon first connect. """ raise NotImplementedError() def do_begin(self, connection): """Provide an implementation of *connection.begin()*, given a DB-API connection.""" raise NotImplementedError() def do_rollback(self, connection): """Provide an implementation of *connection.rollback()*, given a DB-API connection.""" raise NotImplementedError() def create_xid(self): """Create a two-phase transaction ID. This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified. """ raise NotImplementedError() def do_commit(self, connection): """Provide an implementation of *connection.commit()*, given a DB-API connection.""" raise NotImplementedError() def do_savepoint(self, connection, name): """Create a savepoint with the given name on a SQLAlchemy connection.""" raise NotImplementedError() def do_rollback_to_savepoint(self, connection, name): """Rollback a SQL Alchemy connection to the named savepoint.""" raise NotImplementedError() def do_release_savepoint(self, connection, name): """Release the named savepoint on a SQL Alchemy connection.""" raise NotImplementedError() def do_begin_twophase(self, connection, xid): """Begin a two phase transaction on the given connection.""" raise NotImplementedError() def do_prepare_twophase(self, connection, xid): """Prepare a two phase transaction on the given connection.""" raise NotImplementedError() def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): """Rollback a two phase transaction on the given connection.""" raise NotImplementedError() def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): """Commit a two phase transaction on the given connection.""" raise NotImplementedError() def do_recover_twophase(self, connection): """Recover list of uncommited prepared two phase transaction identifiers on the given connection.""" raise NotImplementedError() def do_executemany(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.executemany(statement, parameters)``.""" raise NotImplementedError() def do_execute(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.execute(statement, parameters)``.""" raise NotImplementedError() def do_execute_no_params(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.execute(statement)``. The parameter collection should not be sent. """ raise NotImplementedError() def is_disconnect(self, e, connection, cursor): """Return True if the given DB-API error indicates an invalid connection""" raise NotImplementedError() def connect(self): """return a callable which sets up a newly created DBAPI connection. The callable accepts a single argument "conn" which is the DBAPI connection itself. It has no return value. This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc. If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed. If None is returned, no listener will be generated. """ return None def reset_isolation_level(self, dbapi_conn): """Given a DBAPI connection, revert its isolation to the default.""" raise NotImplementedError() def set_isolation_level(self, dbapi_conn, level): """Given a DBAPI connection, set its isolation level.""" raise NotImplementedError() def get_isolation_level(self, dbapi_conn): """Given a DBAPI connection, return its isolation level.""" raise NotImplementedError() class ExecutionContext(object): """A messenger object for a Dialect that corresponds to a single execution. ExecutionContext should have these data members: connection Connection object which can be freely used by default value generators to execute SQL. This Connection should reference the same underlying connection/transactional resources of root_connection. root_connection Connection object which is the source of this ExecutionContext. This Connection may have close_with_result=True set, in which case it can only be used once. dialect dialect which created this ExecutionContext. cursor DB-API cursor procured from the connection, compiled if passed to constructor, sqlalchemy.engine.base.Compiled object being executed, statement string version of the statement to be executed. Is either passed to the constructor, or must be created from the sql.Compiled object by the time pre_exec() has completed. parameters bind parameters passed to the execute() method. For compiled statements, this is a dictionary or list of dictionaries. For textual statements, it should be in a format suitable for the dialect's paramstyle (i.e. dict or list of dicts for non positional, list or list of lists/tuples for positional). isinsert True if the statement is an INSERT. isupdate True if the statement is an UPDATE. should_autocommit True if the statement is a "committable" statement. postfetch_cols a list of Column objects for which a server-side default or inline SQL expression value was fired off. Applies to inserts and updates. """ def create_cursor(self): """Return a new cursor generated from this ExecutionContext's connection. Some dialects may wish to change the behavior of connection.cursor(), such as postgresql which may return a PG "server side" cursor. """ raise NotImplementedError() def pre_exec(self): """Called before an execution of a compiled statement. If a compiled statement was passed to this ExecutionContext, the `statement` and `parameters` datamembers must be initialized after this statement is complete. """ raise NotImplementedError() def post_exec(self): """Called after the execution of a compiled statement. If a compiled statement was passed to this ExecutionContext, the `last_insert_ids`, `last_inserted_params`, etc. datamembers should be available after this method completes. """ raise NotImplementedError() def result(self): """Return a result object corresponding to this ExecutionContext. Returns a ResultProxy. """ raise NotImplementedError() def handle_dbapi_exception(self, e): """Receive a DBAPI exception which occurred upon execute, result fetch, etc.""" raise NotImplementedError() def should_autocommit_text(self, statement): """Parse the given textual statement and return True if it refers to a "committable" statement""" raise NotImplementedError() def lastrow_has_defaults(self): """Return True if the last INSERT or UPDATE row contained inlined or database-side defaults. """ raise NotImplementedError() def get_rowcount(self): """Return the number of rows produced (by a SELECT query) or affected (by an INSERT/UPDATE/DELETE statement). Note that this row count may not be properly implemented in some dialects; this is indicated by the ``supports_sane_rowcount`` and ``supports_sane_multi_rowcount`` dialect attributes. """ raise NotImplementedError() class Compiled(object): """Represent a compiled SQL or DDL expression. The ``__str__`` method of the ``Compiled`` object should produce the actual text of the statement. ``Compiled`` objects are specific to their underlying database dialect, and also may or may not be specific to the columns referenced within a particular set of bind parameters. In no case should the ``Compiled`` object be dependent on the actual values of those bind parameters, even though it may reference those values as defaults. """ def __init__(self, dialect, statement, bind=None): """Construct a new ``Compiled`` object. :param dialect: ``Dialect`` to compile against. :param statement: ``ClauseElement`` to be compiled. :param bind: Optional Engine or Connection to compile this statement against. """ self.dialect = dialect self.bind = bind if statement is not None: self.statement = statement self.can_execute = statement.supports_execution self.string = self.process(self.statement) @util.deprecated("0.7", ":class:`.Compiled` objects now compile " "within the constructor.") def compile(self): """Produce the internal string representation of this element.""" pass @property def sql_compiler(self): """Return a Compiled that is capable of processing SQL expressions. If this compiler is one, it would likely just return 'self'. """ raise NotImplementedError() def process(self, obj, **kwargs): return obj._compiler_dispatch(self, **kwargs) def __str__(self): """Return the string text of the generated SQL or DDL.""" return self.string or '' def construct_params(self, params=None): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whos values will override bind values compiled in to the statement. """ raise NotImplementedError() @property def params(self): """Return the bind params for this compiled object.""" return self.construct_params() def execute(self, *multiparams, **params): """Execute this compiled object.""" e = self.bind if e is None: raise exc.UnboundExecutionError( "This Compiled object is not bound to any Engine " "or Connection.") return e._execute_compiled(self, multiparams, params) def scalar(self, *multiparams, **params): """Execute this compiled object and return the result's scalar value.""" return self.execute(*multiparams, **params).scalar() class TypeCompiler(object): """Produces DDL specification for TypeEngine objects.""" def __init__(self, dialect): self.dialect = dialect def process(self, type_): return type_._compiler_dispatch(self) class Connectable(object): """Interface for an object which supports execution of SQL constructs. The two implementations of :class:`.Connectable` are :class:`.Connection` and :class:`.Engine`. Connectable must also implement the 'dialect' member which references a :class:`.Dialect` instance. """ def connect(self, **kwargs): """Return a :class:`.Connection` object. Depending on context, this may be ``self`` if this object is already an instance of :class:`.Connection`, or a newly procured :class:`.Connection` if this object is an instance of :class:`.Engine`. """ def contextual_connect(self): """Return a :class:`.Connection` object which may be part of an ongoing context. Depending on context, this may be ``self`` if this object is already an instance of :class:`.Connection`, or a newly procured :class:`.Connection` if this object is an instance of :class:`.Engine`. """ raise NotImplementedError() @util.deprecated("0.7", "Use the create() method on the given schema " "object directly, i.e. :meth:`.Table.create`, " ":meth:`.Index.create`, :meth:`.MetaData.create_all`") def create(self, entity, **kwargs): """Emit CREATE statements for the given schema entity.""" raise NotImplementedError() @util.deprecated("0.7", "Use the drop() method on the given schema " "object directly, i.e. :meth:`.Table.drop`, " ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") def drop(self, entity, **kwargs): """Emit DROP statements for the given schema entity.""" raise NotImplementedError() def execute(self, object, *multiparams, **params): """Executes the given construct and returns a :class:`.ResultProxy`.""" raise NotImplementedError() def scalar(self, object, *multiparams, **params): """Executes and returns the first column of the first row. The underlying cursor is closed after execution. """ raise NotImplementedError() def _run_visitor(self, visitorcallable, element, **kwargs): raise NotImplementedError() def _execute_clauseelement(self, elem, multiparams=None, params=None): raise NotImplementedError() class Connection(Connectable): """Provides high-level functionality for a wrapped DB-API connection. Provides execution support for string-based SQL statements as well as :class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator` objects. Provides a :meth:`begin` method to return :class:`.Transaction` objects. The Connection object is **not** thread-safe. While a Connection can be shared among threads using properly synchronized access, it is still possible that the underlying DBAPI connection may not support shared access between threads. Check the DBAPI documentation for details. The Connection object represents a single dbapi connection checked out from the connection pool. In this state, the connection pool has no affect upon the connection, including its expiration or timeout state. For the connection pool to properly manage connections, connections should be returned to the connection pool (i.e. ``connection.close()``) whenever the connection is not in use. .. index:: single: thread safety; Connection """ def __init__(self, engine, connection=None, close_with_result=False, _branch=False, _execution_options=None): """Construct a new Connection. The constructor here is not public and is only called only by an :class:`.Engine`. See :meth:`.Engine.connect` and :meth:`.Engine.contextual_connect` methods. """ self.engine = engine self.dialect = engine.dialect self.__connection = connection or engine.raw_connection() self.__transaction = None self.should_close_with_result = close_with_result self.__savepoint_seq = 0 self.__branch = _branch self.__invalid = False self._has_events = engine._has_events self._echo = self.engine._should_log_info() if _execution_options: self._execution_options =\ engine._execution_options.union(_execution_options) else: self._execution_options = engine._execution_options def _branch(self): """Return a new Connection which references this Connection's engine and connection; but does not have close_with_result enabled, and also whose close() method does nothing. This is used to execute "sub" statements within a single execution, usually an INSERT statement. """ return self.engine._connection_cls( self.engine, self.__connection, _branch=True) def _clone(self): """Create a shallow copy of this Connection. """ c = self.__class__.__new__(self.__class__) c.__dict__ = self.__dict__.copy() return c def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def execution_options(self, **opt): """ Set non-SQL options for the connection which take effect during execution. The method returns a copy of this :class:`.Connection` which references the same underlying DBAPI connection, but also defines the given execution options which will take effect for a call to :meth:`execute`. As the new :class:`.Connection` references the same underlying resource, it is probably best to ensure that the copies would be discarded immediately, which is implicit if used as in:: result = connection.execution_options(stream_results=True).\\ execute(stmt) :meth:`.Connection.execution_options` accepts all options as those accepted by :meth:`.Executable.execution_options`. Additionally, it includes options that are applicable only to :class:`.Connection`. :param autocommit: Available on: Connection, statement. When True, a COMMIT will be invoked after execution when executed in 'autocommit' mode, i.e. when an explicit transaction is not begun on the connection. Note that DBAPI connections by default are always in a transaction - SQLAlchemy uses rules applied to different kinds of statements to determine if COMMIT will be invoked in order to provide its "autocommit" feature. Typically, all INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements have autocommit behavior enabled; SELECT constructs do not. Use this option when invoking a SELECT or other specific SQL construct where COMMIT is desired (typically when calling stored procedures and such), and an explicit transaction is not in progress. :param compiled_cache: Available on: Connection. A dictionary where :class:`.Compiled` objects will be cached when the :class:`.Connection` compiles a clause expression into a :class:`.Compiled` object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. The format of this dictionary is not guaranteed to stay the same in future releases. Note that the ORM makes use of its own "compiled" caches for some operations, including flush operations. The caching used by the ORM internally supersedes a cache dictionary specified here. :param isolation_level: Available on: Connection. Set the transaction isolation level for the lifespan of this connection. Valid values include those string values accepted by the ``isolation_level`` parameter passed to :func:`.create_engine`, and are database specific, including those for :ref:`sqlite_toplevel`, :ref:`postgresql_toplevel` - see those dialect's documentation for further info. Note that this option necessarily affects the underying DBAPI connection for the lifespan of the originating :class:`.Connection`, and is not per-execution. This setting is not removed until the underying DBAPI connection is returned to the connection pool, i.e. the :meth:`.Connection.close` method is called. :param no_parameters: When ``True``, if the final parameter list or dictionary is totally empty, will invoke the statement on the cursor as ``cursor.execute(statement)``, not passing the parameter collection at all. Some DBAPIs such as psycopg2 and mysql-python consider percent signs as significant only when parameters are present; this option allows code to generate SQL containing percent signs (and possibly other characters) that is neutral regarding whether it's executed by the DBAPI or piped into a script that's later invoked by command line tools. New in 0.7.6. :param stream_results: Available on: Connection, statement. Indicate to the dialect that results should be "streamed" and not pre-buffered, if possible. This is a limitation of many DBAPIs. The flag is currently understood only by the psycopg2 dialect. """ c = self._clone() c._execution_options = c._execution_options.union(opt) if 'isolation_level' in opt: c._set_isolation_level() return c def _set_isolation_level(self): self.dialect.set_isolation_level(self.connection, self._execution_options['isolation_level']) self.connection._connection_record.finalize_callback = \ self.dialect.reset_isolation_level @property def closed(self): """Return True if this connection is closed.""" return not self.__invalid and '_Connection__connection' \ not in self.__dict__ @property def invalidated(self): """Return True if this connection was invalidated.""" return self.__invalid @property def connection(self): "The underlying DB-API connection managed by this Connection." try: return self.__connection except AttributeError: return self._revalidate_connection() def _revalidate_connection(self): if self.__invalid: if self.__transaction is not None: raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") self.__connection = self.engine.raw_connection() self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @property def _connection_is_valid(self): # use getattr() for is_valid to support exceptions raised in # dialect initializer, where the connection is not wrapped in # _ConnectionFairy return getattr(self.__connection, 'is_valid', False) @property def _still_open_and_connection_is_valid(self): return \ not self.closed and \ not self.invalidated and \ getattr(self.__connection, 'is_valid', False) @property def info(self): """A collection of per-DB-API connection instance properties.""" return self.connection.info def connect(self): """Returns self. This ``Connectable`` interface method returns self, allowing Connections to be used interchangably with Engines in most situations that require a bind. """ return self def contextual_connect(self, **kwargs): """Returns self. This ``Connectable`` interface method returns self, allowing Connections to be used interchangably with Engines in most situations that require a bind. """ return self def invalidate(self, exception=None): """Invalidate the underlying DBAPI connection associated with this Connection. The underlying DB-API connection is literally closed (if possible), and is discarded. Its source connection pool will typically lazily create a new connection to replace it. Upon the next usage, this Connection will attempt to reconnect to the pool with a new connection. Transactions in progress remain in an "opened" state (even though the actual transaction is gone); these must be explicitly rolled back before a reconnect on this Connection can proceed. This is to prevent applications from accidentally continuing their transactional operations in a non-transactional state. """ if self.invalidated: return if self.closed: raise exc.ResourceClosedError("This Connection is closed") if self._connection_is_valid: self.__connection.invalidate(exception) del self.__connection self.__invalid = True def detach(self): """Detach the underlying DB-API connection from its connection pool. This Connection instance will remain useable. When closed, the DB-API connection will be literally closed and not returned to its pool. The pool will typically lazily create a new connection to replace the detached connection. This method can be used to insulate the rest of an application from a modified state on a connection (such as a transaction isolation level or similar). Also see :class:`~sqlalchemy.interfaces.PoolListener` for a mechanism to modify connection state when connections leave and return to their connection pool. """ self.__connection.detach() def begin(self): """Begin a transaction and return a transaction handle. The returned object is an instance of :class:`.Transaction`. This object represents the "scope" of the transaction, which completes when either the :meth:`.Transaction.rollback` or :meth:`.Transaction.commit` method is called. Nested calls to :meth:`.begin` on the same :class:`.Connection` will return new :class:`.Transaction` objects that represent an emulated transaction within the scope of the enclosing transaction, that is:: trans = conn.begin() # outermost transaction trans2 = conn.begin() # "nested" trans2.commit() # does nothing trans.commit() # actually commits Calls to :meth:`.Transaction.commit` only have an effect when invoked via the outermost :class:`.Transaction` object, though the :meth:`.Transaction.rollback` method of any of the :class:`.Transaction` objects will roll back the transaction. See also: :meth:`.Connection.begin_nested` - use a SAVEPOINT :meth:`.Connection.begin_twophase` - use a two phase /XID transaction :meth:`.Engine.begin` - context manager available from :class:`.Engine`. """ if self.__transaction is None: self.__transaction = RootTransaction(self) return self.__transaction else: return Transaction(self, self.__transaction) def begin_nested(self): """Begin a nested transaction and return a transaction handle. The returned object is an instance of :class:`.NestedTransaction`. Nested transactions require SAVEPOINT support in the underlying database. Any transaction in the hierarchy may ``commit`` and ``rollback``, however the outermost transaction still controls the overall ``commit`` or ``rollback`` of the transaction of a whole. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`. """ if self.__transaction is None: self.__transaction = RootTransaction(self) else: self.__transaction = NestedTransaction(self, self.__transaction) return self.__transaction def begin_twophase(self, xid=None): """Begin a two-phase or XA transaction and return a transaction handle. The returned object is an instance of :class:`.TwoPhaseTransaction`, which in addition to the methods provided by :class:`.Transaction`, also provides a :meth:`~.TwoPhaseTransaction.prepare` method. :param xid: the two phase transaction id. If not supplied, a random id will be generated. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`. """ if self.__transaction is not None: raise exc.InvalidRequestError( "Cannot start a two phase transaction when a transaction " "is already in progress.") if xid is None: xid = self.engine.dialect.create_xid(); self.__transaction = TwoPhaseTransaction(self, xid) return self.__transaction def recover_twophase(self): return self.engine.dialect.do_recover_twophase(self) def rollback_prepared(self, xid, recover=False): self.engine.dialect.do_rollback_twophase(self, xid, recover=recover) def commit_prepared(self, xid, recover=False): self.engine.dialect.do_commit_twophase(self, xid, recover=recover) def in_transaction(self): """Return True if a transaction is in progress.""" return self.__transaction is not None def _begin_impl(self): if self._echo: self.engine.logger.info("BEGIN (implicit)") if self._has_events: self.engine.dispatch.begin(self) try: self.engine.dialect.do_begin(self.connection) except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) raise def _rollback_impl(self): if self._has_events: self.engine.dispatch.rollback(self) if self._still_open_and_connection_is_valid: if self._echo: self.engine.logger.info("ROLLBACK") try: self.engine.dialect.do_rollback(self.connection) self.__transaction = None except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) raise else: self.__transaction = None def _commit_impl(self): if self._has_events: self.engine.dispatch.commit(self) if self._echo: self.engine.logger.info("COMMIT") try: self.engine.dialect.do_commit(self.connection) self.__transaction = None except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) raise def _savepoint_impl(self, name=None): if self._has_events: self.engine.dispatch.savepoint(self, name) if name is None: self.__savepoint_seq += 1 name = 'sa_savepoint_%s' % self.__savepoint_seq if self._still_open_and_connection_is_valid: self.engine.dialect.do_savepoint(self, name) return name def _rollback_to_savepoint_impl(self, name, context): if self._has_events: self.engine.dispatch.rollback_savepoint(self, name, context) if self._still_open_and_connection_is_valid: self.engine.dialect.do_rollback_to_savepoint(self, name) self.__transaction = context def _release_savepoint_impl(self, name, context): if self._has_events: self.engine.dispatch.release_savepoint(self, name, context) if self._still_open_and_connection_is_valid: self.engine.dialect.do_release_savepoint(self, name) self.__transaction = context def _begin_twophase_impl(self, xid): if self._has_events: self.engine.dispatch.begin_twophase(self, xid) if self._still_open_and_connection_is_valid: self.engine.dialect.do_begin_twophase(self, xid) def _prepare_twophase_impl(self, xid): if self._has_events: self.engine.dispatch.prepare_twophase(self, xid) if self._still_open_and_connection_is_valid: assert isinstance(self.__transaction, TwoPhaseTransaction) self.engine.dialect.do_prepare_twophase(self, xid) def _rollback_twophase_impl(self, xid, is_prepared): if self._has_events: self.engine.dispatch.rollback_twophase(self, xid, is_prepared) if self._still_open_and_connection_is_valid: assert isinstance(self.__transaction, TwoPhaseTransaction) self.engine.dialect.do_rollback_twophase(self, xid, is_prepared) self.__transaction = None def _commit_twophase_impl(self, xid, is_prepared): if self._has_events: self.engine.dispatch.commit_twophase(self, xid, is_prepared) if self._still_open_and_connection_is_valid: assert isinstance(self.__transaction, TwoPhaseTransaction) self.engine.dialect.do_commit_twophase(self, xid, is_prepared) self.__transaction = None def _autorollback(self): if not self.in_transaction(): self._rollback_impl() def close(self): """Close this :class:`.Connection`. This results in a release of the underlying database resources, that is, the DBAPI connection referenced internally. The DBAPI connection is typically restored back to the connection-holding :class:`.Pool` referenced by the :class:`.Engine` that produced this :class:`.Connection`. Any transactional state present on the DBAPI connection is also unconditionally released via the DBAPI connection's ``rollback()`` method, regardless of any :class:`.Transaction` object that may be outstanding with regards to this :class:`.Connection`. After :meth:`~.Connection.close` is called, the :class:`.Connection` is permanently in a closed state, and will allow no further operations. """ try: conn = self.__connection except AttributeError: return if not self.__branch: conn.close() self.__invalid = False del self.__connection self.__transaction = None def scalar(self, object, *multiparams, **params): """Executes and returns the first column of the first row. The underlying result/cursor is closed after execution. """ return self.execute(object, *multiparams, **params).scalar() def execute(self, object, *multiparams, **params): """Executes the a SQL statement construct and returns a :class:`.ResultProxy`. :param object: The statement to be executed. May be one of: * a plain string * any :class:`.ClauseElement` construct that is also a subclass of :class:`.Executable`, such as a :func:`~.expression.select` construct * a :class:`.FunctionElement`, such as that generated by :attr:`.func`, will be automatically wrapped in a SELECT statement, which is then executed. * a :class:`.DDLElement` object * a :class:`.DefaultGenerator` object * a :class:`.Compiled` object :param \*multiparams/\**params: represent bound parameter values to be used in the execution. Typically, the format is either a collection of one or more dictionaries passed to \*multiparams:: conn.execute( table.insert(), {"id":1, "value":"v1"}, {"id":2, "value":"v2"} ) ...or individual key/values interpreted by \**params:: conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, and the underlying DBAPI accepts positional bind parameters, a collection of tuples or individual values in \*multiparams may be passed:: conn.execute( "INSERT INTO table (id, value) VALUES (?, ?)", (1, "v1"), (2, "v2") ) conn.execute( "INSERT INTO table (id, value) VALUES (?, ?)", 1, "v1" ) Note above, the usage of a question mark "?" or other symbol is contingent upon the "paramstyle" accepted by the DBAPI in use, which may be any of "qmark", "named", "pyformat", "format", "numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_ for details on paramstyle. To execute a textual SQL statement which uses bound parameters in a DBAPI-agnostic way, use the :func:`~.expression.text` construct. """ for c in type(object).__mro__: if c in Connection.executors: return Connection.executors[c]( self, object, multiparams, params) else: raise exc.InvalidRequestError( "Unexecutable object type: %s" % type(object)) def __distill_params(self, multiparams, params): """Given arguments from the calling form *multiparams, **params, return a list of bind parameter structures, usually a list of dictionaries. In the case of 'raw' execution which accepts positional parameters, it may be a list of tuples or lists. """ if not multiparams: if params: return [params] else: return [] elif len(multiparams) == 1: zero = multiparams[0] if isinstance(zero, (list, tuple)): if not zero or hasattr(zero[0], '__iter__'): return zero else: return [zero] elif hasattr(zero, 'keys'): return [zero] else: return [[zero]] else: if hasattr(multiparams[0], '__iter__'): return multiparams else: return [multiparams] def _execute_function(self, func, multiparams, params): """Execute a sql.FunctionElement object.""" return self._execute_clauseelement(func.select(), multiparams, params) def _execute_default(self, default, multiparams, params): """Execute a schema.ColumnDefault object.""" if self._has_events: for fn in self.engine.dispatch.before_execute: default, multiparams, params = \ fn(self, default, multiparams, params) try: try: conn = self.__connection except AttributeError: conn = self._revalidate_connection() dialect = self.dialect ctx = dialect.execution_ctx_cls._init_default( dialect, self, conn) except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) raise ret = ctx._exec_default(default, None) if self.should_close_with_result: self.close() if self._has_events: self.engine.dispatch.after_execute(self, default, multiparams, params, ret) return ret def _execute_ddl(self, ddl, multiparams, params): """Execute a schema.DDL object.""" if self._has_events: for fn in self.engine.dispatch.before_execute: ddl, multiparams, params = \ fn(self, ddl, multiparams, params) dialect = self.dialect compiled = ddl.compile(dialect=dialect) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_ddl, compiled, None, compiled ) if self._has_events: self.engine.dispatch.after_execute(self, ddl, multiparams, params, ret) return ret def _execute_clauseelement(self, elem, multiparams, params): """Execute a sql.ClauseElement object.""" if self._has_events: for fn in self.engine.dispatch.before_execute: elem, multiparams, params = \ fn(self, elem, multiparams, params) distilled_params = self.__distill_params(multiparams, params) if distilled_params: keys = distilled_params[0].keys() else: keys = [] dialect = self.dialect if 'compiled_cache' in self._execution_options: key = dialect, elem, tuple(keys), len(distilled_params) > 1 if key in self._execution_options['compiled_cache']: compiled_sql = self._execution_options['compiled_cache'][key] else: compiled_sql = elem.compile( dialect=dialect, column_keys=keys, inline=len(distilled_params) > 1) self._execution_options['compiled_cache'][key] = compiled_sql else: compiled_sql = elem.compile( dialect=dialect, column_keys=keys, inline=len(distilled_params) > 1) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_compiled, compiled_sql, distilled_params, compiled_sql, distilled_params ) if self._has_events: self.engine.dispatch.after_execute(self, elem, multiparams, params, ret) return ret def _execute_compiled(self, compiled, multiparams, params): """Execute a sql.Compiled object.""" if self._has_events: for fn in self.engine.dispatch.before_execute: compiled, multiparams, params = \ fn(self, compiled, multiparams, params) dialect = self.dialect parameters=self.__distill_params(multiparams, params) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_compiled, compiled, parameters, compiled, parameters ) if self._has_events: self.engine.dispatch.after_execute(self, compiled, multiparams, params, ret) return ret def _execute_text(self, statement, multiparams, params): """Execute a string SQL statement.""" if self._has_events: for fn in self.engine.dispatch.before_execute: statement, multiparams, params = \ fn(self, statement, multiparams, params) dialect = self.dialect parameters = self.__distill_params(multiparams, params) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_statement, statement, parameters, statement, parameters ) if self._has_events: self.engine.dispatch.after_execute(self, statement, multiparams, params, ret) return ret def _execute_context(self, dialect, constructor, statement, parameters, *args): """Create an :class:`.ExecutionContext` and execute, returning a :class:`.ResultProxy`.""" try: try: conn = self.__connection except AttributeError: conn = self._revalidate_connection() context = constructor(dialect, self, conn, *args) except Exception, e: self._handle_dbapi_exception(e, str(statement), parameters, None, None) raise if context.compiled: context.pre_exec() cursor, statement, parameters = context.cursor, \ context.statement, \ context.parameters if not context.executemany: parameters = parameters[0] if self._has_events: for fn in self.engine.dispatch.before_cursor_execute: statement, parameters = \ fn(self, cursor, statement, parameters, context, context.executemany) if self._echo: self.engine.logger.info(statement) self.engine.logger.info("%r", sql_util._repr_params(parameters, batches=10)) try: if context.executemany: self.dialect.do_executemany( cursor, statement, parameters, context) elif not parameters and context.no_parameters: self.dialect.do_execute_no_params( cursor, statement, context) else: self.dialect.do_execute( cursor, statement, parameters, context) except Exception, e: self._handle_dbapi_exception( e, statement, parameters, cursor, context) raise if self._has_events: self.engine.dispatch.after_cursor_execute(self, cursor, statement, parameters, context, context.executemany) if context.compiled: context.post_exec() if context.isinsert and not context.executemany: context.post_insert() # create a resultproxy, get rowcount/implicit RETURNING # rows, close cursor if no further results pending result = context.get_result_proxy() if context.isinsert: if context._is_implicit_returning: context._fetch_implicit_returning(result) result.close(_autoclose_connection=False) elif not context._is_explicit_returning: result.close(_autoclose_connection=False) elif result._metadata is None: # no results, get rowcount # (which requires open cursor on some drivers # such as kintersbasdb, mxodbc), result.rowcount result.close(_autoclose_connection=False) if self.__transaction is None and context.should_autocommit: self._commit_impl() if result.closed and self.should_close_with_result: self.close() return result def _cursor_execute(self, cursor, statement, parameters): """Execute a statement + params on the given cursor. Adds appropriate logging and exception handling. This method is used by DefaultDialect for special-case executions, such as for sequences and column defaults. The path of statement execution in the majority of cases terminates at _execute_context(). """ if self._echo: self.engine.logger.info(statement) self.engine.logger.info("%r", parameters) try: self.dialect.do_execute( cursor, statement, parameters) except Exception, e: self._handle_dbapi_exception( e, statement, parameters, cursor, None) raise def _safe_close_cursor(self, cursor): """Close the given cursor, catching exceptions and turning into log warnings. """ try: cursor.close() except Exception, e: try: ex_text = str(e) except TypeError: ex_text = repr(e) self.connection._logger.warn("Error closing cursor: %s", ex_text) if isinstance(e, (SystemExit, KeyboardInterrupt)): raise def _handle_dbapi_exception(self, e, statement, parameters, cursor, context): if getattr(self, '_reentrant_error', False): # Py3K #raise exc.DBAPIError.instance(statement, parameters, e, # self.dialect.dbapi.Error) from e # Py2K raise exc.DBAPIError.instance(statement, parameters, e, self.dialect.dbapi.Error), \ None, sys.exc_info()[2] # end Py2K self._reentrant_error = True try: # non-DBAPI error - if we already got a context, # or theres no string statement, don't wrap it should_wrap = isinstance(e, self.dialect.dbapi.Error) or \ (statement is not None and context is None) if should_wrap and context: context.handle_dbapi_exception(e) is_disconnect = isinstance(e, self.dialect.dbapi.Error) and \ self.dialect.is_disconnect(e, self.__connection, cursor) if is_disconnect: self.invalidate(e) self.engine.dispose() else: if cursor: self._safe_close_cursor(cursor) self._autorollback() if self.should_close_with_result: self.close() if not should_wrap: return # Py3K #raise exc.DBAPIError.instance( # statement, # parameters, # e, # self.dialect.dbapi.Error, # connection_invalidated=is_disconnect) \ # from e # Py2K raise exc.DBAPIError.instance( statement, parameters, e, self.dialect.dbapi.Error, connection_invalidated=is_disconnect), \ None, sys.exc_info()[2] # end Py2K finally: del self._reentrant_error # poor man's multimethod/generic function thingy executors = { expression.FunctionElement: _execute_function, expression.ClauseElement: _execute_clauseelement, Compiled: _execute_compiled, schema.SchemaItem: _execute_default, schema.DDLElement: _execute_ddl, basestring: _execute_text } @util.deprecated("0.7", "Use the create() method on the given schema " "object directly, i.e. :meth:`.Table.create`, " ":meth:`.Index.create`, :meth:`.MetaData.create_all`") def create(self, entity, **kwargs): """Emit CREATE statements for the given schema entity.""" return self.engine.create(entity, connection=self, **kwargs) @util.deprecated("0.7", "Use the drop() method on the given schema " "object directly, i.e. :meth:`.Table.drop`, " ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") def drop(self, entity, **kwargs): """Emit DROP statements for the given schema entity.""" return self.engine.drop(entity, connection=self, **kwargs) @util.deprecated("0.7", "Use autoload=True with :class:`.Table`, " "or use the :class:`.Inspector` object.") def reflecttable(self, table, include_columns=None): """Load table description from the database. Given a :class:`.Table` object, reflect its columns and properties from the database, populating the given :class:`.Table` object with attributes.. If include_columns (a list or set) is specified, limit the autoload to the given column names. The default implementation uses the :class:`.Inspector` interface to provide the output, building upon the granular table/column/ constraint etc. methods of :class:`.Dialect`. """ return self.engine.reflecttable(table, self, include_columns) def default_schema_name(self): return self.engine.dialect.get_default_schema_name(self) def transaction(self, callable_, *args, **kwargs): """Execute the given function within a transaction boundary. The function is passed this :class:`.Connection` as the first argument, followed by the given \*args and \**kwargs, e.g.:: def do_something(conn, x, y): conn.execute("some statement", {'x':x, 'y':y}) conn.transaction(do_something, 5, 10) The operations inside the function are all invoked within the context of a single :class:`.Transaction`. Upon success, the transaction is committed. If an exception is raised, the transaction is rolled back before propagating the exception. .. note:: The :meth:`.transaction` method is superseded by the usage of the Python ``with:`` statement, which can be used with :meth:`.Connection.begin`:: with conn.begin(): conn.execute("some statement", {'x':5, 'y':10}) As well as with :meth:`.Engine.begin`:: with engine.begin() as conn: conn.execute("some statement", {'x':5, 'y':10}) See also: :meth:`.Engine.begin` - engine-level transactional context :meth:`.Engine.transaction` - engine-level version of :meth:`.Connection.transaction` """ trans = self.begin() try: ret = self.run_callable(callable_, *args, **kwargs) trans.commit() return ret except: trans.rollback() raise def run_callable(self, callable_, *args, **kwargs): """Given a callable object or function, execute it, passing a :class:`.Connection` as the first argument. The given \*args and \**kwargs are passed subsequent to the :class:`.Connection` argument. This function, along with :meth:`.Engine.run_callable`, allows a function to be run with a :class:`.Connection` or :class:`.Engine` object without the need to know which one is being dealt with. """ return callable_(self, *args, **kwargs) def _run_visitor(self, visitorcallable, element, **kwargs): visitorcallable(self.dialect, self, **kwargs).traverse_single(element) class Transaction(object): """Represent a database transaction in progress. The :class:`.Transaction` object is procured by calling the :meth:`~.Connection.begin` method of :class:`.Connection`:: from sqlalchemy import create_engine engine = create_engine("postgresql://scott:tiger@localhost/test") connection = engine.connect() trans = connection.begin() connection.execute("insert into x (a, b) values (1, 2)") trans.commit() The object provides :meth:`.rollback` and :meth:`.commit` methods in order to control transaction boundaries. It also implements a context manager interface so that the Python ``with`` statement can be used with the :meth:`.Connection.begin` method:: with connection.begin(): connection.execute("insert into x (a, b) values (1, 2)") The Transaction object is **not** threadsafe. See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`, :meth:`.Connection.begin_nested`. .. index:: single: thread safety; Transaction """ def __init__(self, connection, parent): self.connection = connection self._parent = parent or self self.is_active = True def close(self): """Close this :class:`.Transaction`. If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns. This is used to cancel a Transaction without affecting the scope of an enclosing transaction. """ if not self._parent.is_active: return if self._parent is self: self.rollback() def rollback(self): """Roll back this :class:`.Transaction`. """ if not self._parent.is_active: return self._do_rollback() self.is_active = False def _do_rollback(self): self._parent.rollback() def commit(self): """Commit this :class:`.Transaction`.""" if not self._parent.is_active: raise exc.InvalidRequestError("This transaction is inactive") self._do_commit() self.is_active = False def _do_commit(self): pass def __enter__(self): return self def __exit__(self, type, value, traceback): if type is None and self.is_active: try: self.commit() except: self.rollback() raise else: self.rollback() class RootTransaction(Transaction): def __init__(self, connection): super(RootTransaction, self).__init__(connection, None) self.connection._begin_impl() def _do_rollback(self): if self.is_active: self.connection._rollback_impl() def _do_commit(self): if self.is_active: self.connection._commit_impl() class NestedTransaction(Transaction): """Represent a 'nested', or SAVEPOINT transaction. A new :class:`.NestedTransaction` object may be procured using the :meth:`.Connection.begin_nested` method. The interface is the same as that of :class:`.Transaction`. """ def __init__(self, connection, parent): super(NestedTransaction, self).__init__(connection, parent) self._savepoint = self.connection._savepoint_impl() def _do_rollback(self): if self.is_active: self.connection._rollback_to_savepoint_impl( self._savepoint, self._parent) def _do_commit(self): if self.is_active: self.connection._release_savepoint_impl( self._savepoint, self._parent) class TwoPhaseTransaction(Transaction): """Represent a two-phase transaction. A new :class:`.TwoPhaseTransaction` object may be procured using the :meth:`.Connection.begin_twophase` method. The interface is the same as that of :class:`.Transaction` with the addition of the :meth:`prepare` method. """ def __init__(self, connection, xid): super(TwoPhaseTransaction, self).__init__(connection, None) self._is_prepared = False self.xid = xid self.connection._begin_twophase_impl(self.xid) def prepare(self): """Prepare this :class:`.TwoPhaseTransaction`. After a PREPARE, the transaction can be committed. """ if not self._parent.is_active: raise exc.InvalidRequestError("This transaction is inactive") self.connection._prepare_twophase_impl(self.xid) self._is_prepared = True def _do_rollback(self): self.connection._rollback_twophase_impl(self.xid, self._is_prepared) def _do_commit(self): self.connection._commit_twophase_impl(self.xid, self._is_prepared) class Engine(Connectable, log.Identified): """ Connects a :class:`~sqlalchemy.pool.Pool` and :class:`~sqlalchemy.engine.base.Dialect` together to provide a source of database connectivity and behavior. An :class:`.Engine` object is instantiated publically using the :func:`~sqlalchemy.create_engine` function. See also: :ref:`engines_toplevel` :ref:`connections_toplevel` """ _execution_options = util.immutabledict() _has_events = False _connection_cls = Connection def __init__(self, pool, dialect, url, logging_name=None, echo=None, proxy=None, execution_options=None ): self.pool = pool self.url = url self.dialect = dialect if logging_name: self.logging_name = logging_name self.echo = echo self.engine = self log.instance_logger(self, echoflag=echo) if proxy: interfaces.ConnectionProxy._adapt_listener(self, proxy) if execution_options: if 'isolation_level' in execution_options: raise exc.ArgumentError( "'isolation_level' execution option may " "only be specified on Connection.execution_options(). " "To set engine-wide isolation level, " "use the isolation_level argument to create_engine()." ) self.update_execution_options(**execution_options) dispatch = event.dispatcher(events.ConnectionEvents) def update_execution_options(self, **opt): """Update the default execution_options dictionary of this :class:`.Engine`. The given keys/values in \**opt are added to the default execution options that will be used for all connections. The initial contents of this dictionary can be sent via the ``execution_options`` paramter to :func:`.create_engine`. See :meth:`.Connection.execution_options` for more details on execution options. """ self._execution_options = \ self._execution_options.union(opt) @property def name(self): """String name of the :class:`~sqlalchemy.engine.Dialect` in use by this ``Engine``.""" return self.dialect.name @property def driver(self): """Driver name of the :class:`~sqlalchemy.engine.Dialect` in use by this ``Engine``.""" return self.dialect.driver echo = log.echo_property() def __repr__(self): return 'Engine(%s)' % str(self.url) def dispose(self): """Dispose of the connection pool used by this :class:`.Engine`. A new connection pool is created immediately after the old one has been disposed. This new pool, like all SQLAlchemy connection pools, does not make any actual connections to the database until one is first requested. This method has two general use cases: * When a dropped connection is detected, it is assumed that all connections held by the pool are potentially dropped, and the entire pool is replaced. * An application may want to use :meth:`dispose` within a test suite that is creating multiple engines. It is critical to note that :meth:`dispose` does **not** guarantee that the application will release all open database connections - only those connections that are checked into the pool are closed. Connections which remain checked out or have been detached from the engine are not affected. """ self.pool.dispose() self.pool = self.pool.recreate() @util.deprecated("0.7", "Use the create() method on the given schema " "object directly, i.e. :meth:`.Table.create`, " ":meth:`.Index.create`, :meth:`.MetaData.create_all`") def create(self, entity, connection=None, **kwargs): """Emit CREATE statements for the given schema entity.""" from sqlalchemy.engine import ddl self._run_visitor(ddl.SchemaGenerator, entity, connection=connection, **kwargs) @util.deprecated("0.7", "Use the drop() method on the given schema " "object directly, i.e. :meth:`.Table.drop`, " ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") def drop(self, entity, connection=None, **kwargs): """Emit DROP statements for the given schema entity.""" from sqlalchemy.engine import ddl self._run_visitor(ddl.SchemaDropper, entity, connection=connection, **kwargs) def _execute_default(self, default): connection = self.contextual_connect() try: return connection._execute_default(default, (), {}) finally: connection.close() @property @util.deprecated("0.7", "Use :attr:`~sqlalchemy.sql.expression.func` to create function constructs.") def func(self): return expression._FunctionGenerator(bind=self) @util.deprecated("0.7", "Use :func:`.expression.text` to create text constructs.") def text(self, text, *args, **kwargs): """Return a :func:`~sqlalchemy.sql.expression.text` construct, bound to this engine. This is equivalent to:: text("SELECT * FROM table", bind=engine) """ return expression.text(text, bind=self, *args, **kwargs) def _run_visitor(self, visitorcallable, element, connection=None, **kwargs): if connection is None: conn = self.contextual_connect(close_with_result=False) else: conn = connection try: conn._run_visitor(visitorcallable, element, **kwargs) finally: if connection is None: conn.close() class _trans_ctx(object): def __init__(self, conn, transaction, close_with_result): self.conn = conn self.transaction = transaction self.close_with_result = close_with_result def __enter__(self): return self.conn def __exit__(self, type, value, traceback): if type is not None: self.transaction.rollback() else: self.transaction.commit() if not self.close_with_result: self.conn.close() def begin(self, close_with_result=False): """Return a context manager delivering a :class:`.Connection` with a :class:`.Transaction` established. E.g.:: with engine.begin() as conn: conn.execute("insert into table (x, y, z) values (1, 2, 3)") conn.execute("my_special_procedure(5)") Upon successful operation, the :class:`.Transaction` is committed. If an error is raised, the :class:`.Transaction` is rolled back. The ``close_with_result`` flag is normally ``False``, and indicates that the :class:`.Connection` will be closed when the operation is complete. When set to ``True``, it indicates the :class:`.Connection` is in "single use" mode, where the :class:`.ResultProxy` returned by the first call to :meth:`.Connection.execute` will close the :class:`.Connection` when that :class:`.ResultProxy` has exhausted all result rows. New in 0.7.6. See also: :meth:`.Engine.connect` - procure a :class:`.Connection` from an :class:`.Engine`. :meth:`.Connection.begin` - start a :class:`.Transaction` for a particular :class:`.Connection`. """ conn = self.contextual_connect(close_with_result=close_with_result) trans = conn.begin() return Engine._trans_ctx(conn, trans, close_with_result) def transaction(self, callable_, *args, **kwargs): """Execute the given function within a transaction boundary. The function is passed a :class:`.Connection` newly procured from :meth:`.Engine.contextual_connect` as the first argument, followed by the given \*args and \**kwargs. e.g.:: def do_something(conn, x, y): conn.execute("some statement", {'x':x, 'y':y}) engine.transaction(do_something, 5, 10) The operations inside the function are all invoked within the context of a single :class:`.Transaction`. Upon success, the transaction is committed. If an exception is raised, the transaction is rolled back before propagating the exception. .. note:: The :meth:`.transaction` method is superseded by the usage of the Python ``with:`` statement, which can be used with :meth:`.Engine.begin`:: with engine.begin() as conn: conn.execute("some statement", {'x':5, 'y':10}) See also: :meth:`.Engine.begin` - engine-level transactional context :meth:`.Connection.transaction` - connection-level version of :meth:`.Engine.transaction` """ conn = self.contextual_connect() try: return conn.transaction(callable_, *args, **kwargs) finally: conn.close() def run_callable(self, callable_, *args, **kwargs): """Given a callable object or function, execute it, passing a :class:`.Connection` as the first argument. The given \*args and \**kwargs are passed subsequent to the :class:`.Connection` argument. This function, along with :meth:`.Connection.run_callable`, allows a function to be run with a :class:`.Connection` or :class:`.Engine` object without the need to know which one is being dealt with. """ conn = self.contextual_connect() try: return conn.run_callable(callable_, *args, **kwargs) finally: conn.close() def execute(self, statement, *multiparams, **params): """Executes the given construct and returns a :class:`.ResultProxy`. The arguments are the same as those used by :meth:`.Connection.execute`. Here, a :class:`.Connection` is acquired using the :meth:`~.Engine.contextual_connect` method, and the statement executed with that connection. The returned :class:`.ResultProxy` is flagged such that when the :class:`.ResultProxy` is exhausted and its underlying cursor is closed, the :class:`.Connection` created here will also be closed, which allows its associated DBAPI connection resource to be returned to the connection pool. """ connection = self.contextual_connect(close_with_result=True) return connection.execute(statement, *multiparams, **params) def scalar(self, statement, *multiparams, **params): return self.execute(statement, *multiparams, **params).scalar() def _execute_clauseelement(self, elem, multiparams=None, params=None): connection = self.contextual_connect(close_with_result=True) return connection._execute_clauseelement(elem, multiparams, params) def _execute_compiled(self, compiled, multiparams, params): connection = self.contextual_connect(close_with_result=True) return connection._execute_compiled(compiled, multiparams, params) def connect(self, **kwargs): """Return a new :class:`.Connection` object. The :class:`.Connection` object is a facade that uses a DBAPI connection internally in order to communicate with the database. This connection is procured from the connection-holding :class:`.Pool` referenced by this :class:`.Engine`. When the :meth:`~.Connection.close` method of the :class:`.Connection` object is called, the underlying DBAPI connection is then returned to the connection pool, where it may be used again in a subsequent call to :meth:`~.Engine.connect`. """ return self._connection_cls(self, **kwargs) def contextual_connect(self, close_with_result=False, **kwargs): """Return a :class:`.Connection` object which may be part of some ongoing context. By default, this method does the same thing as :meth:`.Engine.connect`. Subclasses of :class:`.Engine` may override this method to provide contextual behavior. :param close_with_result: When True, the first :class:`.ResultProxy` created by the :class:`.Connection` will call the :meth:`.Connection.close` method of that connection as soon as any pending result rows are exhausted. This is used to supply the "connectionless execution" behavior provided by the :meth:`.Engine.execute` method. """ return self._connection_cls(self, self.pool.connect(), close_with_result=close_with_result, **kwargs) def table_names(self, schema=None, connection=None): """Return a list of all table names available in the database. :param schema: Optional, retrieve names from a non-default schema. :param connection: Optional, use a specified connection. Default is the ``contextual_connect`` for this ``Engine``. """ if connection is None: conn = self.contextual_connect() else: conn = connection if not schema: schema = self.dialect.default_schema_name try: return self.dialect.get_table_names(conn, schema) finally: if connection is None: conn.close() @util.deprecated("0.7", "Use autoload=True with :class:`.Table`, " "or use the :class:`.Inspector` object.") def reflecttable(self, table, connection=None, include_columns=None): """Load table description from the database. Uses the given :class:`.Connection`, or if None produces its own :class:`.Connection`, and passes the ``table`` and ``include_columns`` arguments onto that :class:`.Connection` object's :meth:`.Connection.reflecttable` method. The :class:`.Table` object is then populated with new attributes. """ if connection is None: conn = self.contextual_connect() else: conn = connection try: self.dialect.reflecttable(conn, table, include_columns) finally: if connection is None: conn.close() def has_table(self, table_name, schema=None): return self.run_callable(self.dialect.has_table, table_name, schema) def raw_connection(self): """Return a "raw" DBAPI connection from the connection pool. The returned object is a proxied version of the DBAPI connection object used by the underlying driver in use. The object will have all the same behavior as the real DBAPI connection, except that its ``close()`` method will result in the connection being returned to the pool, rather than being closed for real. This method provides direct DBAPI connection access for special situations. In most situations, the :class:`.Connection` object should be used, which is procured using the :meth:`.Engine.connect` method. """ return self.pool.unique_connection() # This reconstructor is necessary so that pickles with the C extension or # without use the same Binary format. try: # We need a different reconstructor on the C extension so that we can # add extra checks that fields have correctly been initialized by # __setstate__. from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor # The extra function embedding is needed so that the # reconstructor function has the same signature whether or not # the extension is present. def rowproxy_reconstructor(cls, state): return safe_rowproxy_reconstructor(cls, state) except ImportError: def rowproxy_reconstructor(cls, state): obj = cls.__new__(cls) obj.__setstate__(state) return obj try: from sqlalchemy.cresultproxy import BaseRowProxy except ImportError: class BaseRowProxy(object): __slots__ = ('_parent', '_row', '_processors', '_keymap') def __init__(self, parent, row, processors, keymap): """RowProxy objects are constructed by ResultProxy objects.""" self._parent = parent self._row = row self._processors = processors self._keymap = keymap def __reduce__(self): return (rowproxy_reconstructor, (self.__class__, self.__getstate__())) def values(self): """Return the values represented by this RowProxy as a list.""" return list(self) def __iter__(self): for processor, value in izip(self._processors, self._row): if processor is None: yield value else: yield processor(value) def __len__(self): return len(self._row) def __getitem__(self, key): try: processor, obj, index = self._keymap[key] except KeyError: processor, obj, index = self._parent._key_fallback(key) except TypeError: if isinstance(key, slice): l = [] for processor, value in izip(self._processors[key], self._row[key]): if processor is None: l.append(value) else: l.append(processor(value)) return tuple(l) else: raise if index is None: raise exc.InvalidRequestError( "Ambiguous column name '%s' in result set! " "try 'use_labels' option on select statement." % key) if processor is not None: return processor(self._row[index]) else: return self._row[index] def __getattr__(self, name): try: return self[name] except KeyError, e: raise AttributeError(e.args[0]) class RowProxy(BaseRowProxy): """Proxy values from a single cursor row. Mostly follows "ordered dictionary" behavior, mapping result values to the string-based column name, the integer position of the result in the row, as well as Column instances which can be mapped to the original Columns that produced this result set (for results that correspond to constructed SQL expressions). """ __slots__ = () def __contains__(self, key): return self._parent._has_key(self._row, key) def __getstate__(self): return { '_parent': self._parent, '_row': tuple(self) } def __setstate__(self, state): self._parent = parent = state['_parent'] self._row = state['_row'] self._processors = parent._processors self._keymap = parent._keymap __hash__ = None def __eq__(self, other): return other is self or other == tuple(self) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return repr(tuple(self)) def has_key(self, key): """Return True if this RowProxy contains the given key.""" return self._parent._has_key(self._row, key) def items(self): """Return a list of tuples, each tuple containing a key/value pair.""" # TODO: no coverage here return [(key, self[key]) for key in self.iterkeys()] def keys(self): """Return the list of keys as strings represented by this RowProxy.""" return self._parent.keys def iterkeys(self): return iter(self._parent.keys) def itervalues(self): return iter(self) try: # Register RowProxy with Sequence, # so sequence protocol is implemented from collections import Sequence Sequence.register(RowProxy) except ImportError: pass class ResultMetaData(object): """Handle cursor.description, applying additional info from an execution context.""" def __init__(self, parent, metadata): self._processors = processors = [] # We do not strictly need to store the processor in the key mapping, # though it is faster in the Python version (probably because of the # saved attribute lookup self._processors) self._keymap = keymap = {} self.keys = [] context = parent.context dialect = context.dialect typemap = dialect.dbapi_type_map translate_colname = dialect._translate_colname # high precedence key values. primary_keymap = {} for i, rec in enumerate(metadata): colname = rec[0] coltype = rec[1] if dialect.description_encoding: colname = dialect._description_decoder(colname) if translate_colname: colname, untranslated = translate_colname(colname) if context.result_map: try: name, obj, type_ = context.result_map[colname.lower()] except KeyError: name, obj, type_ = \ colname, None, typemap.get(coltype, types.NULLTYPE) else: name, obj, type_ = \ colname, None, typemap.get(coltype, types.NULLTYPE) processor = type_._cached_result_processor(dialect, coltype) processors.append(processor) rec = (processor, obj, i) # indexes as keys. This is only needed for the Python version of # RowProxy (the C version uses a faster path for integer indexes). primary_keymap[i] = rec # populate primary keymap, looking for conflicts. if primary_keymap.setdefault(name.lower(), rec) is not rec: # place a record that doesn't have the "index" - this # is interpreted later as an AmbiguousColumnError, # but only when actually accessed. Columns # colliding by name is not a problem if those names # aren't used; integer and ColumnElement access is always # unambiguous. primary_keymap[name.lower()] = (processor, obj, None) if dialect.requires_name_normalize: colname = dialect.normalize_name(colname) self.keys.append(colname) if obj: for o in obj: keymap[o] = rec if translate_colname and \ untranslated: keymap[untranslated] = rec # overwrite keymap values with those of the # high precedence keymap. keymap.update(primary_keymap) if parent._echo: context.engine.logger.debug( "Col %r", tuple(x[0] for x in metadata)) @util.pending_deprecation("0.8", "sqlite dialect uses " "_translate_colname() now") def _set_keymap_synonym(self, name, origname): """Set a synonym for the given name. Some dialects (SQLite at the moment) may use this to adjust the column names that are significant within a row. """ rec = (processor, obj, i) = self._keymap[origname.lower()] if self._keymap.setdefault(name, rec) is not rec: self._keymap[name] = (processor, obj, None) def _key_fallback(self, key, raiseerr=True): map = self._keymap result = None if isinstance(key, basestring): result = map.get(key.lower()) # fallback for targeting a ColumnElement to a textual expression # this is a rare use case which only occurs when matching text() # or colummn('name') constructs to ColumnElements, or after a # pickle/unpickle roundtrip elif isinstance(key, expression.ColumnElement): if key._label and key._label.lower() in map: result = map[key._label.lower()] elif hasattr(key, 'name') and key.name.lower() in map: # match is only on name. result = map[key.name.lower()] # search extra hard to make sure this # isn't a column/label name overlap. # this check isn't currently available if the row # was unpickled. if result is not None and \ result[1] is not None: for obj in result[1]: if key._compare_name_for_result(obj): break else: result = None if result is None: if raiseerr: raise exc.NoSuchColumnError( "Could not locate column in row for column '%s'" % expression._string_or_unprintable(key)) else: return None else: map[key] = result return result def _has_key(self, row, key): if key in self._keymap: return True else: return self._key_fallback(key, False) is not None def __getstate__(self): return { '_pickled_keymap': dict( (key, index) for key, (processor, obj, index) in self._keymap.iteritems() if isinstance(key, (basestring, int)) ), 'keys': self.keys } def __setstate__(self, state): # the row has been processed at pickling time so we don't need any # processor anymore self._processors = [None for _ in xrange(len(state['keys']))] self._keymap = keymap = {} for key, index in state['_pickled_keymap'].iteritems(): # not preserving "obj" here, unfortunately our # proxy comparison fails with the unpickle keymap[key] = (None, None, index) self.keys = state['keys'] self._echo = False class ResultProxy(object): """Wraps a DB-API cursor object to provide easier access to row columns. Individual columns may be accessed by their integer position, case-insensitive column name, or by ``schema.Column`` object. e.g.:: row = fetchone() col1 = row[0] # access via integer position col2 = row['col2'] # access via name col3 = row[mytable.c.mycol] # access via Column object. ``ResultProxy`` also handles post-processing of result column data using ``TypeEngine`` objects, which are referenced from the originating SQL statement that produced this result set. """ _process_row = RowProxy out_parameters = None _can_close_connection = False def __init__(self, context): self.context = context self.dialect = context.dialect self.closed = False self.cursor = self._saved_cursor = context.cursor self.connection = context.root_connection self._echo = self.connection._echo and \ context.engine._should_log_debug() self._init_metadata() def _init_metadata(self): metadata = self._cursor_description() if metadata is None: self._metadata = None else: self._metadata = ResultMetaData(self, metadata) def keys(self): """Return the current set of string keys for rows.""" if self._metadata: return self._metadata.keys else: return [] @util.memoized_property def rowcount(self): """Return the 'rowcount' for this result. The 'rowcount' reports the number of rows affected by an UPDATE or DELETE statement. It has *no* other uses and is not intended to provide the number of rows present from a SELECT. Note that this row count may not be properly implemented in some dialects; this is indicated by :meth:`~sqlalchemy.engine.base.ResultProxy.supports_sane_rowcount()` and :meth:`~sqlalchemy.engine.base.ResultProxy.supports_sane_multi_rowcount()`. ``rowcount()`` also may not work at this time for a statement that uses ``returning()``. """ try: return self.context.rowcount except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) raise @property def lastrowid(self): """return the 'lastrowid' accessor on the DBAPI cursor. This is a DBAPI specific method and is only functional for those backends which support it, for statements where it is appropriate. It's behavior is not consistent across backends. Usage of this method is normally unnecessary; the :attr:`~ResultProxy.inserted_primary_key` attribute provides a tuple of primary key values for a newly inserted row, regardless of database backend. """ try: return self._saved_cursor.lastrowid except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self._saved_cursor, self.context) raise @property def returns_rows(self): """True if this :class:`.ResultProxy` returns rows. I.e. if it is legal to call the methods :meth:`~.ResultProxy.fetchone`, :meth:`~.ResultProxy.fetchmany` :meth:`~.ResultProxy.fetchall`. """ return self._metadata is not None @property def is_insert(self): """True if this :class:`.ResultProxy` is the result of a executing an expression language compiled :func:`.expression.insert` construct. When True, this implies that the :attr:`inserted_primary_key` attribute is accessible, assuming the statement did not include a user defined "returning" construct. """ return self.context.isinsert def _cursor_description(self): """May be overridden by subclasses.""" return self._saved_cursor.description def close(self, _autoclose_connection=True): """Close this ResultProxy. Closes the underlying DBAPI cursor corresponding to the execution. Note that any data cached within this ResultProxy is still available. For some types of results, this may include buffered rows. If this ResultProxy was generated from an implicit execution, the underlying Connection will also be closed (returns the underlying DBAPI connection to the connection pool.) This method is called automatically when: * all result rows are exhausted using the fetchXXX() methods. * cursor.description is None. """ if not self.closed: self.closed = True self.connection._safe_close_cursor(self.cursor) if _autoclose_connection and \ self.connection.should_close_with_result: self.connection.close() # allow consistent errors self.cursor = None def __iter__(self): while True: row = self.fetchone() if row is None: raise StopIteration else: yield row @util.memoized_property def inserted_primary_key(self): """Return the primary key for the row just inserted. The return value is a list of scalar values corresponding to the list of primary key columns in the target table. This only applies to single row :func:`.insert` constructs which did not explicitly specify :meth:`.Insert.returning`. Note that primary key columns which specify a server_default clause, or otherwise do not qualify as "autoincrement" columns (see the notes at :class:`.Column`), and were generated using the database-side default, will appear in this list as ``None`` unless the backend supports "returning" and the insert statement executed with the "implicit returning" enabled. """ if not self.context.isinsert: raise exc.InvalidRequestError( "Statement is not an insert() expression construct.") elif self.context._is_explicit_returning: raise exc.InvalidRequestError( "Can't call inserted_primary_key when returning() " "is used.") return self.context.inserted_primary_key @util.deprecated("0.6", "Use :attr:`.ResultProxy.inserted_primary_key`") def last_inserted_ids(self): """Return the primary key for the row just inserted.""" return self.inserted_primary_key def last_updated_params(self): """Return the collection of updated parameters from this execution. """ if self.context.executemany: return self.context.compiled_parameters else: return self.context.compiled_parameters[0] def last_inserted_params(self): """Return the collection of inserted parameters from this execution. """ if self.context.executemany: return self.context.compiled_parameters else: return self.context.compiled_parameters[0] def lastrow_has_defaults(self): """Return ``lastrow_has_defaults()`` from the underlying ExecutionContext. See ExecutionContext for details. """ return self.context.lastrow_has_defaults() def postfetch_cols(self): """Return ``postfetch_cols()`` from the underlying ExecutionContext. See ExecutionContext for details. """ return self.context.postfetch_cols def prefetch_cols(self): return self.context.prefetch_cols def supports_sane_rowcount(self): """Return ``supports_sane_rowcount`` from the dialect.""" return self.dialect.supports_sane_rowcount def supports_sane_multi_rowcount(self): """Return ``supports_sane_multi_rowcount`` from the dialect.""" return self.dialect.supports_sane_multi_rowcount def _fetchone_impl(self): try: return self.cursor.fetchone() except AttributeError: self._non_result() def _fetchmany_impl(self, size=None): try: if size is None: return self.cursor.fetchmany() else: return self.cursor.fetchmany(size) except AttributeError: self._non_result() def _fetchall_impl(self): try: return self.cursor.fetchall() except AttributeError: self._non_result() def _non_result(self): if self._metadata is None: raise exc.ResourceClosedError( "This result object does not return rows. " "It has been closed automatically.", ) else: raise exc.ResourceClosedError("This result object is closed.") def process_rows(self, rows): process_row = self._process_row metadata = self._metadata keymap = metadata._keymap processors = metadata._processors if self._echo: log = self.context.engine.logger.debug l = [] for row in rows: log("Row %r", row) l.append(process_row(metadata, row, processors, keymap)) return l else: return [process_row(metadata, row, processors, keymap) for row in rows] def fetchall(self): """Fetch all rows, just like DB-API ``cursor.fetchall()``.""" try: l = self.process_rows(self._fetchall_impl()) self.close() return l except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) raise def fetchmany(self, size=None): """Fetch many rows, just like DB-API ``cursor.fetchmany(size=cursor.arraysize)``. If rows are present, the cursor remains open after this is called. Else the cursor is automatically closed and an empty list is returned. """ try: l = self.process_rows(self._fetchmany_impl(size)) if len(l) == 0: self.close() return l except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) raise def fetchone(self): """Fetch one row, just like DB-API ``cursor.fetchone()``. If a row is present, the cursor remains open after this is called. Else the cursor is automatically closed and None is returned. """ try: row = self._fetchone_impl() if row is not None: return self.process_rows([row])[0] else: self.close() return None except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) raise def first(self): """Fetch the first row and then close the result set unconditionally. Returns None if no row is present. """ if self._metadata is None: self._non_result() try: row = self._fetchone_impl() except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) raise try: if row is not None: return self.process_rows([row])[0] else: return None finally: self.close() def scalar(self): """Fetch the first column of the first row, and close the result set. Returns None if no row is present. """ row = self.first() if row is not None: return row[0] else: return None class BufferedRowResultProxy(ResultProxy): """A ResultProxy with row buffering behavior. ``ResultProxy`` that buffers the contents of a selection of rows before ``fetchone()`` is called. This is to allow the results of ``cursor.description`` to be available immediately, when interfacing with a DB-API that requires rows to be consumed before this information is available (currently psycopg2, when used with server-side cursors). The pre-fetching behavior fetches only one row initially, and then grows its buffer size by a fixed amount with each successive need for additional rows up to a size of 100. """ def _init_metadata(self): self.__buffer_rows() super(BufferedRowResultProxy, self)._init_metadata() # this is a "growth chart" for the buffering of rows. # each successive __buffer_rows call will use the next # value in the list for the buffer size until the max # is reached size_growth = { 1 : 5, 5 : 10, 10 : 20, 20 : 50, 50 : 100, 100 : 250, 250 : 500, 500 : 1000 } def __buffer_rows(self): size = getattr(self, '_bufsize', 1) self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) self._bufsize = self.size_growth.get(size, size) def _fetchone_impl(self): if self.closed: return None if not self.__rowbuffer: self.__buffer_rows() if not self.__rowbuffer: return None return self.__rowbuffer.popleft() def _fetchmany_impl(self, size=None): if size is None: return self._fetchall_impl() result = [] for x in range(0, size): row = self._fetchone_impl() if row is None: break result.append(row) return result def _fetchall_impl(self): self.__rowbuffer.extend(self.cursor.fetchall()) ret = self.__rowbuffer self.__rowbuffer = collections.deque() return ret class FullyBufferedResultProxy(ResultProxy): """A result proxy that buffers rows fully upon creation. Used for operations where a result is to be delivered after the database conversation can not be continued, such as MSSQL INSERT...OUTPUT after an autocommit. """ def _init_metadata(self): super(FullyBufferedResultProxy, self)._init_metadata() self.__rowbuffer = self._buffer_rows() def _buffer_rows(self): return collections.deque(self.cursor.fetchall()) def _fetchone_impl(self): if self.__rowbuffer: return self.__rowbuffer.popleft() else: return None def _fetchmany_impl(self, size=None): if size is None: return self._fetchall_impl() result = [] for x in range(0, size): row = self._fetchone_impl() if row is None: break result.append(row) return result def _fetchall_impl(self): ret = self.__rowbuffer self.__rowbuffer = collections.deque() return ret class BufferedColumnRow(RowProxy): def __init__(self, parent, row, processors, keymap): # preprocess row row = list(row) # this is a tad faster than using enumerate index = 0 for processor in parent._orig_processors: if processor is not None: row[index] = processor(row[index]) index += 1 row = tuple(row) super(BufferedColumnRow, self).__init__(parent, row, processors, keymap) class BufferedColumnResultProxy(ResultProxy): """A ResultProxy with column buffering behavior. ``ResultProxy`` that loads all columns into memory each time fetchone() is called. If fetchmany() or fetchall() are called, the full grid of results is fetched. This is to operate with databases where result rows contain "live" results that fall out of scope unless explicitly fetched. Currently this includes cx_Oracle LOB objects. """ _process_row = BufferedColumnRow def _init_metadata(self): super(BufferedColumnResultProxy, self)._init_metadata() metadata = self._metadata # orig_processors will be used to preprocess each row when they are # constructed. metadata._orig_processors = metadata._processors # replace the all type processors by None processors. metadata._processors = [None for _ in xrange(len(metadata.keys))] keymap = {} for k, (func, obj, index) in metadata._keymap.iteritems(): keymap[k] = (None, obj, index) self._metadata._keymap = keymap def fetchall(self): # can't call cursor.fetchall(), since rows must be # fully processed before requesting more from the DBAPI. l = [] while True: row = self.fetchone() if row is None: break l.append(row) return l def fetchmany(self, size=None): # can't call cursor.fetchmany(), since rows must be # fully processed before requesting more from the DBAPI. if size is None: return self.fetchall() l = [] for i in xrange(size): row = self.fetchone() if row is None: break l.append(row) return l def connection_memoize(key): """Decorator, memoize a function in a connection.info stash. Only applicable to functions which take no arguments other than a connection. The memo will be stored in ``connection.info[key]``. """ @util.decorator def decorated(fn, self, connection): connection = connection.connect() try: return connection.info[key] except KeyError: connection.info[key] = val = fn(self, connection) return val return decorated
gpl-3.0
ChanderG/numpy
numpy/polynomial/tests/test_polyutils.py
202
3094
"""Tests for polyutils module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.polyutils as pu from numpy.testing import ( TestCase, assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite) class TestMisc(TestCase): def test_trimseq(self): for i in range(5): tgt = [1] res = pu.trimseq([1] + [0]*5) assert_equal(res, tgt) def test_as_series(self): # check exceptions assert_raises(ValueError, pu.as_series, [[]]) assert_raises(ValueError, pu.as_series, [[[1, 2]]]) assert_raises(ValueError, pu.as_series, [[1], ['a']]) # check common types types = ['i', 'd', 'O'] for i in range(len(types)): for j in range(i): ci = np.ones(1, types[i]) cj = np.ones(1, types[j]) [resi, resj] = pu.as_series([ci, cj]) assert_(resi.dtype.char == resj.dtype.char) assert_(resj.dtype.char == types[i]) def test_trimcoef(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, pu.trimcoef, coef, -1) # Test results assert_equal(pu.trimcoef(coef), coef[:-1]) assert_equal(pu.trimcoef(coef, 1), coef[:-3]) assert_equal(pu.trimcoef(coef, 2), [0]) class TestDomain(TestCase): def test_getdomain(self): # test for real values x = [1, 10, 3, -1] tgt = [-1, 10] res = pu.getdomain(x) assert_almost_equal(res, tgt) # test for complex values x = [1 + 1j, 1 - 1j, 0, 2] tgt = [-1j, 2 + 1j] res = pu.getdomain(x) assert_almost_equal(res, tgt) def test_mapdomain(self): # test for real values dom1 = [0, 4] dom2 = [1, 3] tgt = dom2 res = pu. mapdomain(dom1, dom1, dom2) assert_almost_equal(res, tgt) # test for complex values dom1 = [0 - 1j, 2 + 1j] dom2 = [-2, 2] tgt = dom2 x = dom1 res = pu.mapdomain(x, dom1, dom2) assert_almost_equal(res, tgt) # test for multidimensional arrays dom1 = [0, 4] dom2 = [1, 3] tgt = np.array([dom2, dom2]) x = np.array([dom1, dom1]) res = pu.mapdomain(x, dom1, dom2) assert_almost_equal(res, tgt) # test that subtypes are preserved. dom1 = [0, 4] dom2 = [1, 3] x = np.matrix([dom1, dom1]) res = pu.mapdomain(x, dom1, dom2) assert_(isinstance(res, np.matrix)) def test_mapparms(self): # test for real values dom1 = [0, 4] dom2 = [1, 3] tgt = [1, .5] res = pu. mapparms(dom1, dom2) assert_almost_equal(res, tgt) # test for complex values dom1 = [0 - 1j, 2 + 1j] dom2 = [-2, 2] tgt = [-1 + 1j, 1 - 1j] res = pu.mapparms(dom1, dom2) assert_almost_equal(res, tgt) if __name__ == "__main__": run_module_suite()
bsd-3-clause
SanPen/PracticalGridModeling
examples/substation.py
1
1669
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Nov 20 16:40:22 2017 @author: santi """ import pandas as pd import numpy as np import networkx as nx from matplotlib import pyplot as plt if __name__ == "__main__": # load data conn_df = pd.read_excel('substation.xlsx', 'Connectivity', index_col=0).fillna(0) stat_df = pd.read_excel('substation.xlsx', 'States', index_col=0) pos_df = pd.read_excel('substation.xlsx', 'Pos', index_col=0) node_names = conn_df.columns.values G = nx.Graph() pos = dict() lpos = dict() # add nodes to the graph for i in range(len(node_names)): G.add_node(node_names[i]) x = pos_df.values[i, 0] y = pos_df.values[i, 1] pos[node_names[i]] = [x, y] lpos[node_names[i]] = [x, y] # add branches to the graph for i, line in enumerate(conn_df.values): if stat_df.values[i] > 0: x, y = np.where(line > 0)[0] # works because there are only 2 values per line with a 1 in the excel file n1 = node_names[x] n2 = node_names[y] G.add_edge(n1, n2) # get the islands islands = list(nx.connected_components(G)) sub_grids = list() print('Islands:\n', islands, '\n\n') for island in islands: g = nx.subgraph(G, island) sub_grids.append(g) # plot nx.draw(G, pos=pos, node_size=100, node_color='black') for name in node_names: x, y = lpos[name] plt.text(x+1.5,y+1,s=name, bbox=dict(facecolor='white', alpha=0.5), horizontalalignment='center') plt.show()
gpl-3.0
sergiorua/libcloud
libcloud/dns/drivers/zerigo.py
31
18314
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'ZerigoDNSDriver' ] import copy import base64 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b try: from lxml import etree as ET except ImportError: from xml.etree import ElementTree as ET from libcloud.utils.misc import merge_valid_keys, get_new_obj from libcloud.utils.xml import findtext, findall from libcloud.common.base import XmlResponse, ConnectionUserAndKey from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.types import MalformedResponseError from libcloud.dns.types import Provider, RecordType from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.base import DNSDriver, Zone, Record API_HOST = 'ns.zerigo.com' API_VERSION = '1.1' API_ROOT = '/api/%s/' % (API_VERSION) VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers'] VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority'] # Number of items per page (maximum limit is 1000) ITEMS_PER_PAGE = 100 class ZerigoError(LibcloudError): def __init__(self, code, errors): self.code = code self.errors = errors or [] def __str__(self): return 'Errors: %s' % (', '.join(self.errors)) def __repr__(self): return ('<ZerigoError response code=%s, errors count=%s>' % ( self.code, len(self.errors))) class ZerigoDNSResponse(XmlResponse): def success(self): return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] def parse_error(self): status = int(self.status) if status == 401: if not self.body: raise InvalidCredsError(str(self.status) + ': ' + self.error) else: raise InvalidCredsError(self.body) elif status == 404: context = self.connection.context if context['resource'] == 'zone': raise ZoneDoesNotExistError(value='', driver=self, zone_id=context['id']) elif context['resource'] == 'record': raise RecordDoesNotExistError(value='', driver=self, record_id=context['id']) elif status != 503: try: body = ET.XML(self.body) except: raise MalformedResponseError('Failed to parse XML', body=self.body) errors = [] for error in findall(element=body, xpath='error'): errors.append(error.text) raise ZerigoError(code=status, errors=errors) return self.body class ZerigoDNSConnection(ConnectionUserAndKey): host = API_HOST secure = True responseCls = ZerigoDNSResponse def add_default_headers(self, headers): auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8')) return headers def request(self, action, params=None, data='', headers=None, method='GET'): if not headers: headers = {} if not params: params = {} if method in ("POST", "PUT"): headers = {'Content-Type': 'application/xml; charset=UTF-8'} return super(ZerigoDNSConnection, self).request(action=action, params=params, data=data, method=method, headers=headers) class ZerigoDNSDriver(DNSDriver): type = Provider.ZERIGO name = 'Zerigo DNS' website = 'http://www.zerigo.com/' connectionCls = ZerigoDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.GEO: 'GEO', RecordType.MX: 'MX', RecordType.NAPTR: 'NAPTR', RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.REDIRECT: 'REDIRECT', RecordType.SPF: 'SPF', RecordType.SRV: 'SRV', RecordType.TXT: 'TXT', RecordType.URL: 'URL', } def iterate_zones(self): return self._get_more('zones') def iterate_records(self, zone): return self._get_more('records', zone=zone) def get_zone(self, zone_id): path = API_ROOT + 'zones/%s.xml' % (zone_id) self.connection.set_context({'resource': 'zone', 'id': zone_id}) data = self.connection.request(path).object zone = self._to_zone(elem=data) return zone def get_record(self, zone_id, record_id): zone = self.get_zone(zone_id=zone_id) self.connection.set_context({'resource': 'record', 'id': record_id}) path = API_ROOT + 'hosts/%s.xml' % (record_id) data = self.connection.request(path).object record = self._to_record(elem=data, zone=zone) return record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Create a new zone. Provider API docs: https://www.zerigo.com/docs/apis/dns/1.1/zones/create @inherits: :class:`DNSDriver.create_zone` """ path = API_ROOT + 'zones.xml' zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, extra=extra) data = self.connection.request(action=path, data=ET.tostring(zone_elem), method='POST').object zone = self._to_zone(elem=data) return zone def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): """ Update an existing zone. Provider API docs: https://www.zerigo.com/docs/apis/dns/1.1/zones/update @inherits: :class:`DNSDriver.update_zone` """ if domain: raise LibcloudError('Domain cannot be changed', driver=self) path = API_ROOT + 'zones/%s.xml' % (zone.id) zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, extra=extra) response = self.connection.request(action=path, data=ET.tostring(zone_elem), method='PUT') assert response.status == httplib.OK merged = merge_valid_keys(params=copy.deepcopy(zone.extra), valid_keys=VALID_ZONE_EXTRA_PARAMS, extra=extra) updated_zone = get_new_obj(obj=zone, klass=Zone, attributes={'type': type, 'ttl': ttl, 'extra': merged}) return updated_zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. Provider API docs: https://www.zerigo.com/docs/apis/dns/1.1/hosts/create @inherits: :class:`DNSDriver.create_record` """ path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) record_elem = self._to_record_elem(name=name, type=type, data=data, extra=extra) response = self.connection.request(action=path, data=ET.tostring(record_elem), method='POST') assert response.status == httplib.CREATED record = self._to_record(elem=response.object, zone=zone) return record def update_record(self, record, name=None, type=None, data=None, extra=None): path = API_ROOT + 'hosts/%s.xml' % (record.id) record_elem = self._to_record_elem(name=name, type=type, data=data, extra=extra) response = self.connection.request(action=path, data=ET.tostring(record_elem), method='PUT') assert response.status == httplib.OK merged = merge_valid_keys(params=copy.deepcopy(record.extra), valid_keys=VALID_RECORD_EXTRA_PARAMS, extra=extra) updated_record = get_new_obj(obj=record, klass=Record, attributes={'type': type, 'data': data, 'extra': merged}) return updated_record def delete_zone(self, zone): path = API_ROOT + 'zones/%s.xml' % (zone.id) self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.request(action=path, method='DELETE') return response.status == httplib.OK def delete_record(self, record): path = API_ROOT + 'hosts/%s.xml' % (record.id) self.connection.set_context({'resource': 'record', 'id': record.id}) response = self.connection.request(action=path, method='DELETE') return response.status == httplib.OK def ex_get_zone_by_domain(self, domain): """ Retrieve a zone object by the domain name. :param domain: The domain which should be used :type domain: ``str`` :rtype: :class:`Zone` """ path = API_ROOT + 'zones/%s.xml' % (domain) self.connection.set_context({'resource': 'zone', 'id': domain}) data = self.connection.request(path).object zone = self._to_zone(elem=data) return zone def ex_force_slave_axfr(self, zone): """ Force a zone transfer. :param zone: Zone which should be used. :type zone: :class:`Zone` :rtype: :class:`Zone` """ path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id) self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.request(path, method='POST') assert response.status == httplib.ACCEPTED return zone def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None): zone_elem = ET.Element('zone', {}) if domain: domain_elem = ET.SubElement(zone_elem, 'domain') domain_elem.text = domain if type: ns_type_elem = ET.SubElement(zone_elem, 'ns-type') if type == 'master': ns_type_elem.text = 'pri_sec' elif type == 'slave': if not extra or 'ns1' not in extra: raise LibcloudError('ns1 extra attribute is required ' + 'when zone type is slave', driver=self) ns_type_elem.text = 'sec' ns1_elem = ET.SubElement(zone_elem, 'ns1') ns1_elem.text = extra['ns1'] elif type == 'std_master': # TODO: Each driver should provide supported zone types # Slave name servers are elsewhere if not extra or 'slave-nameservers' not in extra: raise LibcloudError('slave-nameservers extra ' + 'attribute is required whenzone ' + 'type is std_master', driver=self) ns_type_elem.text = 'pri' slave_nameservers_elem = ET.SubElement(zone_elem, 'slave-nameservers') slave_nameservers_elem.text = extra['slave-nameservers'] if ttl: default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl') default_ttl_elem.text = str(ttl) if extra and 'tag-list' in extra: tags = extra['tag-list'] tags_elem = ET.SubElement(zone_elem, 'tag-list') tags_elem.text = ' '.join(tags) return zone_elem def _to_record_elem(self, name=None, type=None, data=None, extra=None): record_elem = ET.Element('host', {}) if name: name_elem = ET.SubElement(record_elem, 'hostname') name_elem.text = name if type is not None: type_elem = ET.SubElement(record_elem, 'host-type') type_elem.text = self.RECORD_TYPE_MAP[type] if data: data_elem = ET.SubElement(record_elem, 'data') data_elem.text = data if extra: if 'ttl' in extra: ttl_elem = ET.SubElement(record_elem, 'ttl', {'type': 'integer'}) ttl_elem.text = str(extra['ttl']) if 'priority' in extra: # Only MX and SRV records support priority priority_elem = ET.SubElement(record_elem, 'priority', {'type': 'integer'}) priority_elem.text = str(extra['priority']) if 'notes' in extra: notes_elem = ET.SubElement(record_elem, 'notes') notes_elem.text = extra['notes'] return record_elem def _to_zones(self, elem): zones = [] for item in findall(element=elem, xpath='zone'): zone = self._to_zone(elem=item) zones.append(zone) return zones def _to_zone(self, elem): id = findtext(element=elem, xpath='id') domain = findtext(element=elem, xpath='domain') type = findtext(element=elem, xpath='ns-type') type = 'master' if type.find('pri') == 0 else 'slave' ttl = findtext(element=elem, xpath='default-ttl') hostmaster = findtext(element=elem, xpath='hostmaster') custom_ns = findtext(element=elem, xpath='custom-ns') custom_nameservers = findtext(element=elem, xpath='custom-nameservers') notes = findtext(element=elem, xpath='notes') nx_ttl = findtext(element=elem, xpath='nx-ttl') slave_nameservers = findtext(element=elem, xpath='slave-nameservers') tags = findtext(element=elem, xpath='tag-list') tags = tags.split(' ') if tags else [] extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns, 'custom-nameservers': custom_nameservers, 'notes': notes, 'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers, 'tags': tags} zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), driver=self, extra=extra) return zone def _to_records(self, elem, zone): records = [] for item in findall(element=elem, xpath='host'): record = self._to_record(elem=item, zone=zone) records.append(record) return records def _to_record(self, elem, zone): id = findtext(element=elem, xpath='id') name = findtext(element=elem, xpath='hostname') type = findtext(element=elem, xpath='host-type') type = self._string_to_record_type(type) data = findtext(element=elem, xpath='data') notes = findtext(element=elem, xpath='notes', no_text_value=None) state = findtext(element=elem, xpath='state', no_text_value=None) fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None) priority = findtext(element=elem, xpath='priority', no_text_value=None) ttl = findtext(element=elem, xpath='ttl', no_text_value=None) if not name: name = None if ttl: ttl = int(ttl) extra = {'notes': notes, 'state': state, 'fqdn': fqdn, 'priority': priority, 'ttl': ttl} record = Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, extra=extra) return record def _get_more(self, rtype, **kwargs): exhausted = False last_key = None while not exhausted: items, last_key, exhausted = self._get_data(rtype, last_key, **kwargs) for item in items: yield item def _get_data(self, rtype, last_key, **kwargs): # Note: last_key in this case really is a "last_page". # TODO: Update base driver and change last_key to something more # generic - e.g. marker params = {} params['per_page'] = ITEMS_PER_PAGE params['page'] = last_key + 1 if last_key else 1 if rtype == 'zones': path = API_ROOT + 'zones.xml' response = self.connection.request(path) transform_func = self._to_zones elif rtype == 'records': zone = kwargs['zone'] path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) self.connection.set_context({'resource': 'zone', 'id': zone.id}) response = self.connection.request(path, params=params) transform_func = self._to_records exhausted = False result_count = int(response.headers.get('x-query-count', 0)) if (params['page'] * ITEMS_PER_PAGE) >= result_count: exhausted = True if response.status == httplib.OK: items = transform_func(elem=response.object, **kwargs) return items, params['page'], exhausted else: return [], None, True
apache-2.0
FlaPer87/django-nonrel
tests/regressiontests/forms/localflavor/au.py
25
3528
# -*- coding: utf-8 -*- # Tests for the contrib/localflavor/ AU form fields. tests = r""" ## AUPostCodeField ########################################################## A field that accepts a four digit Australian post code. >>> from django.contrib.localflavor.au.forms import AUPostCodeField >>> f = AUPostCodeField() >>> f.clean('1234') u'1234' >>> f.clean('2000') u'2000' >>> f.clean('abcd') Traceback (most recent call last): ... ValidationError: [u'Enter a 4 digit post code.'] >>> f.clean('20001') Traceback (most recent call last): ... ValidationError: [u'Enter a 4 digit post code.'] >>> f.clean(None) Traceback (most recent call last): ... ValidationError: [u'This field is required.'] >>> f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] >>> f = AUPostCodeField(required=False) >>> f.clean('1234') u'1234' >>> f.clean('2000') u'2000' >>> f.clean('abcd') Traceback (most recent call last): ... ValidationError: [u'Enter a 4 digit post code.'] >>> f.clean('20001') Traceback (most recent call last): ... ValidationError: [u'Enter a 4 digit post code.'] >>> f.clean(None) u'' >>> f.clean('') u'' ## AUPhoneNumberField ######################################################## A field that accepts a 10 digit Australian phone number. llows spaces and parentheses around area code. >>> from django.contrib.localflavor.au.forms import AUPhoneNumberField >>> f = AUPhoneNumberField() >>> f.clean('1234567890') u'1234567890' >>> f.clean('0213456789') u'0213456789' >>> f.clean('02 13 45 67 89') u'0213456789' >>> f.clean('(02) 1345 6789') u'0213456789' >>> f.clean('(02) 1345-6789') u'0213456789' >>> f.clean('(02)1345-6789') u'0213456789' >>> f.clean('0408 123 456') u'0408123456' >>> f.clean('123') Traceback (most recent call last): ... ValidationError: [u'Phone numbers must contain 10 digits.'] >>> f.clean('1800DJANGO') Traceback (most recent call last): ... ValidationError: [u'Phone numbers must contain 10 digits.'] >>> f.clean(None) Traceback (most recent call last): ... ValidationError: [u'This field is required.'] >>> f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] >>> f = AUPhoneNumberField(required=False) >>> f.clean('1234567890') u'1234567890' >>> f.clean('0213456789') u'0213456789' >>> f.clean('02 13 45 67 89') u'0213456789' >>> f.clean('(02) 1345 6789') u'0213456789' >>> f.clean('(02) 1345-6789') u'0213456789' >>> f.clean('(02)1345-6789') u'0213456789' >>> f.clean('0408 123 456') u'0408123456' >>> f.clean('123') Traceback (most recent call last): ... ValidationError: [u'Phone numbers must contain 10 digits.'] >>> f.clean('1800DJANGO') Traceback (most recent call last): ... ValidationError: [u'Phone numbers must contain 10 digits.'] >>> f.clean(None) u'' >>> f.clean('') u'' ## AUStateSelect ############################################################# AUStateSelect is a Select widget that uses a list of Australian states/territories as its choices. >>> from django.contrib.localflavor.au.forms import AUStateSelect >>> f = AUStateSelect() >>> print f.render('state', 'NSW') <select name="state"> <option value="ACT">Australian Capital Territory</option> <option value="NSW" selected="selected">New South Wales</option> <option value="NT">Northern Territory</option> <option value="QLD">Queensland</option> <option value="SA">South Australia</option> <option value="TAS">Tasmania</option> <option value="VIC">Victoria</option> <option value="WA">Western Australia</option> </select> """
bsd-3-clause
madeleinel/WoMentor
twitter/tweetparse.py
1
2950
import re from acceptedlangs import accepted_langs_normal, accepted_langs_lower def tweetParse(tweetString): # first identify if mentor or mentee isMentorBool = '#mentor' in tweetString.lower() isMentor = 'mentor' if isMentorBool else 'mentee' # then identify indices of '-'s in string indices = [i for i, x in enumerate(tweetString) if x == "-"] index_1 = indices[0] index_2 = indices[1] index_3 = indices[2] tweetStringLength = len(tweetString) # slice string into substrings and make the lists into an array of values substring_1 = tweetString[index_1:index_2] substring_2 = tweetString[index_2:index_3] substring_3 = tweetString[index_3:tweetStringLength] substring_1 = toMultiStrings(substring_1) substring_2 = toMultiStrings(substring_2, isOffer=True) substring_3 = toMultiStrings(substring_3, isOffer=True) substring_1, substring_2 = checkTheLists(substring_1, substring_2) substring_3 = checkOffers(substring_3) return isMentor, substring_1, substring_2, substring_3 def toMultiStrings(string, isOffer=False): if isOffer: string = re.sub('-(.*?):', '', string) string = string.split(',') string = string.strip() if type(string) is str else stripString(string) return string else: string = re.sub('-(.*?):', '', string) string = re.sub(',', '', string) string = string.strip() string = string.split() return string def stripString(string): for indx, strng in enumerate(string): strng = strng.strip() string[indx] = strng return string def checkTheLists(langstore, skillstore): langs = [] skills = [] for language in langstore: if language.lower() in accepted_langs_lower: langs.append(language) elif 'SQL' in language: langs.append(language) else: skills.append(language) if len(langs) > 4: langs = langs[:3] langs.append('and more...') if len(skillstore) <= 4: if len(skills) != 0: for skill in skills: skillstore.append(skill) if len(skillstore) > 4: skillstore = skillstore[:3] skillstore.append('and more...') return langs, skillstore def checkOffers(offerstore): offers = [] for offer in offerstore: if 'start' in offer: offers.append('getting started') elif 'career' in offer: offers.append('career advice') elif 'project' in offer: offers.append('project help') return offers # example tweets: # #WomenToTech #Mentor -langs: javascript, python, haskell -skill: node.js, d3.js, jinja2 -offering: help getting started # #WomenToTech #Mentee -langs: ruby, -skills: french, baking, SVG -seeking: help getting started # if they have more than 3 languages change the third to 'and more' in the db # if it contains SQL just include it as is # we need to link to the tweet thread on the mentor list
mit
catsop/CATMAID
django/applications/djsopnet/control/segment.py
1
25749
import json from collections import namedtuple import pysopnet from django.core.exceptions import ValidationError from django.db import connection from django.http import Http404, HttpResponse from django.shortcuts import get_object_or_404 from catmaid.control.authentication import requires_user_role from catmaid.models import UserRole from djsopnet.control.common import safe_split, hash_to_id, id_to_hash from djsopnet.control.slice import slice_dict, _retrieve_slices_by_ids, \ _slicecursor_to_namedtuple, _slice_select_query from djsopnet.models import SegmentationStack def segment_dict(segment, with_solution=False): sd = {'hash': id_to_hash(segment.id), 'section': segment.section_sup, 'box': [segment.min_x, segment.min_y, segment.max_x, segment.max_y], 'type': segment.type, 'cost': segment.cost} if with_solution: sd['in_solution'] = segment.in_solution return sd def generate_segment_response(segment): if segment: return HttpResponse(json.dumps(segment_dict(segment)), content_type='application/json') else: return HttpResponse(json.dumps({'id': None}), content_type='application/json') def generate_segments_response(segments, with_solutions=False): segment_list = [segment_dict(segment, with_solutions) for segment in segments] return HttpResponse(json.dumps({'ok': True, 'segments': segment_list}), content_type='application/json') def generate_features_response(features): features_dicts = [] for feature in features: segment_hash = id_to_hash(feature.segment_id) feature_values = feature.features features_dicts.append({'hash': segment_hash, 'fv': feature_values}) return HttpResponse(json.dumps({'ok': True, 'features': features_dicts}), content_type='application/json') # --- Segments --- def setup_feature_names(names, stack, project): try: FeatureInfo.objects.get(stack=stack) return False except FeatureInfo.DoesNotExist: ids = [] for name in names: feature_name = FeatureName(name=name) feature_name.save() ids.append(feature_name.id) info = FeatureInfo(stack=stack, name_ids=ids, size=len(ids)) info.save() return True def get_feature_names(stack, project): # get feature names, if they exist. # throws FeatureNameInfo.DoesNotExist, and possibly FeatureNameInfo.MultipleObjectsReturned feature_info = FeatureInfo.objects.get(stack=stack) keys = feature_info.name_ids feature_name_objects = FeatureName.objects.filter(id__in=keys) feature_names = [] # ensure that the order of the feature_names list corresponds to that of keys for id in keys: for fno in feature_name_objects: if fno.id == id: feature_names.append(fno.name) return feature_names def _segmentcursor_to_namedtuple(cursor): """Create a namedtuple list stubbing for Segment objects from a cursor. Assumes the cursor has been executed and has at least the following columns: in_solution_core_ids. """ cols = [col[0] for col in cursor.description] SegmentTuple = namedtuple('SegmentTuple', cols) def segmentrow_to_namedtuple(row): rowdict = dict(zip(cols, row)) # In PostgreSQL 9.4 it will be possible to preserve column names in JSON # aggregated ROW columns without subqueries or CTEs. For now manually # map from default field names to original column names. rowdict.update({ 'in_solution': dict([ (solution['f1'], solution['f2']) for solution in rowdict['in_solution'] ]) }) if not any(rowdict['in_solution'].keys()): rowdict['in_solution'] = False return SegmentTuple(**rowdict) return [segmentrow_to_namedtuple(row) for row in cursor.fetchall()] def _segment_select_query(segmentation_stack_id, segment_id_query): """Build a querystring to select segments given an ID query. Keyword arguments: segment_id_query -- A string SELECT statement returning a segment_id column """ return ''' SELECT s.id, s.section_sup, s.min_x, s.min_y, s.max_x, s.max_y, s.type, s.cost, ARRAY_TO_JSON(ARRAY_AGG(DISTINCT ROW(ssol.solution_id, ssol.assembly_id))) AS in_solution FROM segstack_%(segstack_id)s.segment s JOIN (%(segment_id_query)s) AS segment_id_query ON (segment_id_query.segment_id = s.id) LEFT JOIN (SELECT aseg.segment_id, sola.solution_id, sola.assembly_id, sp.core_id FROM segstack_%(segstack_id)s.assembly_segment aseg JOIN segstack_%(segstack_id)s.solution_assembly sola ON sola.assembly_id = aseg.assembly_id JOIN segstack_%(segstack_id)s.solution_precedence sp ON sp.solution_id = sola.solution_id) AS ssol ON (ssol.segment_id = s.id) GROUP BY s.id ''' % {'segstack_id': segmentation_stack_id, 'segment_id_query': segment_id_query} def _retrieve_segments_by_ids(segmentation_stack_id, segment_ids): segments = [] if segment_ids: cursor = connection.cursor() cursor.execute(_segment_select_query( segmentation_stack_id, ''' SELECT * FROM (VALUES (%s)) AS t (segment_id) ''' % '),('.join(map(str, segment_ids)))) segments = _segmentcursor_to_namedtuple(cursor) return segments @requires_user_role([UserRole.Annotate, UserRole.Browse]) def retrieve_segment_and_conflicts(request, project_id, segmentation_stack_id): """ Retrieve a segment (or set of co-section conflicting segments), its slices, their first-order conflict slices, and segments involving these slices in the same section. """ segstack = get_object_or_404(SegmentationStack, pk=segmentation_stack_id) segment_id = ','.join([str(hash_to_id(x)) for x in safe_split(request.POST.get('hash'), 'segment hashes')]) cursor = connection.cursor() cursor.execute((''' WITH req_seg_slices AS ( SELECT slice_id FROM segstack_%(segstack_id)s.segment_slice WHERE segment_id IN (%(segment_id)s)) ''' % {'segstack_id': segmentation_stack_id, 'segment_id': segment_id}) + \ _slice_select_query(segmentation_stack_id, ''' SELECT ss2.slice_id FROM segstack_%(segstack_id)s.segment_slice ss1 JOIN segstack_%(segstack_id)s.segment ss1_seg ON (ss1.segment_id = ss1_seg.id AND ss1_seg.section_sup = ( SELECT section_sup FROM segstack_%(segstack_id)s.segment WHERE id IN (%(segment_id)s) LIMIT 1)) JOIN segstack_%(segstack_id)s.segment_slice ss2 ON (ss2.segment_id = ss1.segment_id) WHERE ss1.slice_id IN (SELECT slice_id FROM req_seg_slices UNION SELECT scs_a.slice_a_id AS slice_id FROM segstack_%(segstack_id)s.slice_conflict scs_a, req_seg_slices WHERE scs_a.slice_b_id = req_seg_slices.slice_id UNION SELECT scs_b.slice_b_id AS slice_id FROM segstack_%(segstack_id)s.slice_conflict scs_b, req_seg_slices WHERE scs_b.slice_a_id = req_seg_slices.slice_id) ''' % {'segstack_id': segmentation_stack_id, 'segment_id': segment_id})) slices = _slicecursor_to_namedtuple(cursor) expanded_segment_ids = sum([ [summary['segment_id'] for summary in slice.segment_summaries] for slice in slices if slice.segment_summaries], []) segments = _retrieve_segments_by_ids(segstack.id, expanded_segment_ids) segment_list = [segment_dict(segment, with_solution=True) for segment in segments] slices_list = [slice_dict(slice, with_conflicts=True, with_solution=True) for slice in slices] return HttpResponse(json.dumps({'ok': True, 'segments': segment_list, 'slices': slices_list}), content_type='application/json') def set_feature_names(request, project_id=None, stack_id=None): s = get_object_or_404(Stack, pk=stack_id) p = get_object_or_404(Project, pk=project_id) names = [] try: names = safe_split(request.POST.get('names'), 'names') existing_names = get_feature_names(s, p) if existing_names == names: return HttpResponse(json.dumps({'ok': True}), content_type='application/json') else: return HttpResponse(json.dumps({'ok': False, 'reason' : 'tried to set different feature names'}), content_type='application/json') except FeatureInfo.DoesNotExist: if setup_feature_names(names, s, p): return HttpResponse(json.dumps({'ok': True}), content_type='application/json') else: return HttpResponse(json.dumps({'ok': False, 'reason' : 'something went horribly, horribly awry'}), content_type='application/json') def retrieve_feature_names(request, project_id=None, stack_id=None): s = get_object_or_404(Stack, pk=stack_id) p = get_object_or_404(Project, pk=project_id) names = get_feature_names(s, p) return HttpResponse(json.dumps({'names': names}), content_type='application/json') def get_segment_features(request, project_id=None, stack_id=None): s = get_object_or_404(Stack, pk=stack_id) segment_ids = map(hash_to_id, safe_split(request.POST.get('hash'), 'segment hashes')) features = SegmentFeatures.objects.filter(segment__in=segment_ids) return generate_features_response(features) def retrieve_segment_solutions(request, project_id=None, stack_id=None): segment_ids = map(hash_to_id, safe_split(request.POST.get('hash'), 'segment hashes')) core_id = int(request.POST.get('core_id')) solutions = SegmentSolution.objects.filter(core_id=core_id, segment__in=segment_ids) solution_dicts = [{'hash': id_to_hash(solution.segment.id), 'solution': solution.solution} for solution in solutions] return HttpResponse(json.dumps({'ok': True, 'solutions': solution_dicts}), content_type='application/json') def retrieve_block_ids_by_segments(request, project_id=None, stack_id=None): s = get_object_or_404(Stack, pk=stack_id) segment_ids = map(hash_to_id, safe_split(request.POST.get('hash'), 'segment hashes')) segments = Segment.objects.filter(stack=s, id__in=segment_ids) block_relations = SegmentBlockRelation.objects.filter(segment__in=segments) blocks = {br.block for br in block_relations} block_ids = [block.id for block in blocks] return HttpResponse(json.dumps({'ok': True, 'block_ids': block_ids}), content_type='application/json') @requires_user_role(UserRole.Annotate) def create_segment_for_slices(request, project_id, segmentation_stack_id): """Creates a segment joining a specified set of slices. Ends must specify section supremum.""" segstack = get_object_or_404(SegmentationStack, pk=segmentation_stack_id) try: slice_ids = map(hash_to_id, safe_split(request.POST.get('hash'), 'slice hashes')) segment = _create_segment_for_slices(segstack.id, slice_ids, request.POST.get('section_sup', None)) return generate_segment_response(segment) except ValidationError as ve: return HttpResponseBadRequest(json.dumps({'error': str(ve)}), content_type='application/json') except DuplicateSegmentException as dse: return HttpResponse(json.dumps({'error': str(dse)}), status=409, content_type='application/json') class DuplicateSegmentException(Exception): """Indicates a segment for a set of slices already exists.""" pass def _create_segment_for_slices(segmentation_stack_id, slice_ids, section_sup): """Creates a segment joining a specified set of slices. Ends must specify section supremum.""" if len(slice_ids) == 0: raise ValidationError('Must specify at least one slices for a segment') slices = _retrieve_slices_by_ids(segmentation_stack_id, slice_ids) if len(slices) != len(slice_ids): raise ValidationError('Segment refers to non-existent slices') sections = [x.section for x in slices] section_span = max(sections) - min(sections) if section_span > 1: raise ValidationError('Slices must be in adjacent sections') if section_span == 0 and len(slices) > 1: raise ValidationError('End segments must contain exactly one slice') if len(slices) > 3: raise ValidationError('SOPNET only supports branches of 1:2 slices') # Set segment section_sup # If continuation or branch, should be max(sections) # If an end, should be request param, otherwise invalid request if len(slices) == 1: if section_sup is None: raise ValidationError('End segments must specify section supremum') if section_sup < max(sections) or section_sup > max(sections) + 1: raise ValidationError('End segment section supremum must be slice section or next section') else: section_sup = max(sections) # Set segment extents extrema of slice extents min_x = min([x.min_x for x in slices]) min_y = min([x.min_y for x in slices]) max_x = min([x.max_x for x in slices]) max_y = min([x.max_y for x in slices]) # Get segment hash from SOPNET leftSliceHashes = pysopnet.SliceHashVector() leftSliceHashes.extend([long(id_to_hash(x.id)) for x in slices if x.section != section_sup]) rightSliceHashes = pysopnet.SliceHashVector() rightSliceHashes.extend([long(id_to_hash(x.id)) for x in slices if x.section == section_sup]) segment_hash = pysopnet.segmentHashValue(leftSliceHashes, rightSliceHashes) segment_id = hash_to_id(segment_hash) cursor = connection.cursor() cursor.execute('SELECT 1 FROM segstack_%s.segment WHERE id = %s LIMIT 1' % (segmentation_stack_id, segment_id)) if cursor.rowcount > 0: raise DuplicateSegmentException('Segment already exists with hash: %s id: %s' % (segment_hash, segment_id)) type = len(slices) - 1 # Create segment, associate slices to segment, and associate segment to blocks cursor.execute(''' INSERT INTO segstack_%(segstack_id)s.segment (id, section_sup, type, min_x, min_y, max_x, max_y) VALUES (%(segment_id)s, %(section_sup)s, %(type)s, %(min_x)s, %(min_y)s, %(max_x)s, %(max_y)s); INSERT INTO segstack_%(segstack_id)s.segment_slice (segment_id, slice_id, direction) SELECT seg.id, slice.id, slice.section <> %(section_sup)s FROM (VALUES (%(segment_id)s)) AS seg (id), (SELECT id, section FROM segstack_%(segstack_id)s.slice WHERE id IN (%(slice_ids)s)) AS slice (id); INSERT INTO segstack_%(segstack_id)s.segment_block_relation (segment_id, block_id) SELECT seg.id, sbr.block_id FROM (VALUES (%(segment_id)s)) AS seg (id), (SELECT DISTINCT block_id FROM segstack_%(segstack_id)s.slice_block_relation WHERE slice_id IN (%(slice_ids)s)) AS sbr; ''' % {'segstack_id': segmentation_stack_id, 'segment_id': segment_id, 'section_sup': section_sup, 'type': type, 'min_x': min_x, 'min_y': min_y, 'max_x': max_x, 'max_y': max_y, 'slice_ids': ','.join(map(str, slice_ids))}) segment = _retrieve_segments_by_ids(segmentation_stack_id, [segment_id])[0] return segment @requires_user_role([UserRole.Annotate, UserRole.Browse]) def retrieve_constraints(request, project_id, segmentation_stack_id): segstack = get_object_or_404(SegmentationStack, pk=segmentation_stack_id) segment_ids = ','.join([str(hash_to_id(x)) for x in safe_split(request.POST.get('hash'), 'segment hashes')]) cursor = connection.cursor() cursor.execute(''' SELECT c.id, c.relation, c.value, ARRAY_TO_JSON(ARRAY_AGG(ROW(csr2.segment_id, csr2.coefficient))) FROM segstack_%(segstack_id)s.solution_constraint c JOIN segstack_%(segstack_id)s.constraint_segment_relation csr1 ON (csr1.constraint_id = c.id) JOIN segstack_%(segstack_id)s.constraint_segment_relation csr2 ON (csr2.constraint_id = c.id) WHERE csr1.segment_id IN (%(segment_ids)s) GROUP BY c.id ''' % {'segstack_id': segstack.id, 'segment_ids': segment_ids}) constraints = [{'id': row[0], 'relation': row[1], 'value': row[2], 'segments': [(id_to_hash(seg['f1']), seg['f2']) for seg in row[3]]} for row in cursor.fetchall()] return HttpResponse(json.dumps(constraints), content_type='application/json') @requires_user_role(UserRole.Annotate) def constrain_segment(request, project_id, segmentation_stack_id, segment_hash): segstack = get_object_or_404(SegmentationStack, pk=segmentation_stack_id) segment_id = hash_to_id(segment_hash) delete_conflicts = 'true' == request.POST.get('delete_conflicts', 'false') cursor = connection.cursor() cursor.execute('SELECT 1 FROM segstack_%s.segment WHERE id = %s LIMIT 1' % (segstack.id, segment_id)) if cursor.rowcount == 0: raise Http404('No segment exists with hash: %s id: %s' % (segment_hash, segment_id)) # Check if constraints exist on any segments in conflict with this # constraint. To be conservative, do not attempt to interpret the form of # any conflicting constraints (i.e., treat them all as conflicts, even # if the constraint may be compatible). cursor.execute(''' SELECT DISTINCT csr.constraint_id FROM segstack_{segstack_id}.constraint_segment_relation csr JOIN segstack_{segstack_id}.segment_slice ss1 ON (ss1.segment_id = csr.segment_id) JOIN segstack_{segstack_id}.slice_conflict sc ON (sc.slice_a_id = ss1.slice_id OR sc.slice_b_id = ss1.slice_id) JOIN segstack_{segstack_id}.segment_slice ss2 ON ((ss2.slice_id = sc.slice_a_id OR ss2.slice_id = sc.slice_b_id) AND ss2.segment_id <> ss1.segment_id) WHERE ss2.segment_id = %s '''.format(segstack_id=segstack.id), (segment_id,)) conflicting_constraint_ids = [row[0] for row in cursor.fetchall()] conflicting_constraint_ids_str = ','.join(map(str, conflicting_constraint_ids)) if conflicting_constraint_ids: if delete_conflicts: # Verify that all conflicting constraints are simple, # single-segment fixed assignments. cursor.execute(''' SELECT c.id, c.relation, c.value, COUNT(csr.*) AS segment_count, SUM(ABS(csr.coefficient)) AS segment_abs_sum FROM segstack_%(segstack_id)s.solution_constraint c JOIN segstack_%(segstack_id)s.constraint_segment_relation csr ON (csr.constraint_id = c.id) WHERE c.id IN (%(conflicting_constraint_ids)s) GROUP BY c.id HAVING c.relation <> 'Equal'::constraintrelation OR c.value <> 1 OR COUNT(csr.*) <> 1 OR SUM(ABS(csr.coefficient)) <> 1 ''' % {'segstack_id': segstack.id, 'conflicting_constraint_ids': conflicting_constraint_ids_str}) non_trivial_conflicts = cursor.rowcount if not non_trivial_conflicts == 0: non_trivial_conflict_ids = [row[0] for row in cursor.fetchall()] raise ValidationError('Can not delete conflicting non-trivial constraints: %s' % ','.join(map(str, non_trivial_conflict_ids))) cursor.execute(''' DELETE FROM segstack_%(segstack_id)s.block_constraint_relation bcr WHERE bcr.constraint_id IN (%(conflicting_constraint_ids)s); DELETE FROM segstack_%(segstack_id)s.correction c WHERE c.constraint_id IN (%(conflicting_constraint_ids)s); DELETE FROM segstack_%(segstack_id)s.constraint_segment_relation csr WHERE csr.constraint_id IN (%(conflicting_constraint_ids)s); DELETE FROM segstack_%(segstack_id)s.solution_constraint c WHERE c.id IN (%(conflicting_constraint_ids)s); ''' % {'segstack_id': segstack.id, 'conflicting_constraint_ids': conflicting_constraint_ids_str}) else: raise ValidationError('Conflicting constraints exist: %s' % conflicting_constraint_ids_str) cursor.execute(''' WITH solconstraint AS ( INSERT INTO segstack_%(segstack_id)s.solution_constraint (user_id, relation, value, creation_time, edition_time) VALUES (%(user_id)s, 'Equal', 1.0, current_timestamp, current_timestamp) RETURNING id) INSERT INTO segstack_%(segstack_id)s.constraint_segment_relation (segment_id, constraint_id, coefficient) (SELECT segment.id, solconstraint.id, 1.0 FROM (VALUES (%(segment_id)s)) AS segment (id), solconstraint) RETURNING constraint_id; ''' % {'segstack_id': segstack.id, 'segment_id': segment_id, 'user_id': request.user.id}) constraint_id = cursor.fetchone()[0] cursor.execute(''' INSERT INTO segstack_%(segstack_id)s.block_constraint_relation (constraint_id, block_id) (SELECT solconstraint.id, sbr.block_id FROM (VALUES (%(constraint_id)s)) AS solconstraint (id), (SELECT block_id FROM segstack_%(segstack_id)s.segment_block_relation WHERE segment_id = %(segment_id)s) AS sbr); ''' % {'segstack_id': segstack.id, 'segment_id': segment_id, 'constraint_id': constraint_id}) # Mark explicitly conflicting segments (segments with slices in conflict # sets with the constrained segment, or segments in the same section # with slices in common with the constrained segment) as mistakes being # corrected. The latter condition is needed to mark end segments, which # may not involve a conflicting slice. cursor.execute(''' WITH req_seg_slices AS ( SELECT slice_id, direction FROM segstack_%(segstack_id)s.segment_slice WHERE segment_id = %(segment_id)s) INSERT INTO segstack_%(segstack_id)s.correction (constraint_id, mistake_id) SELECT c.id, conflict.segment_id FROM (VALUES (%(constraint_id)s)) AS c (id), (SELECT DISTINCT aseg.segment_id AS segment_id FROM segstack_%(segstack_id)s.solution_precedence sp JOIN segstack_%(segstack_id)s.solution_assembly sola ON sola.solution_id = sp.solution_id JOIN segstack_%(segstack_id)s.assembly_segment aseg ON (aseg.assembly_id = sola.assembly_id AND aseg.segment_id <> %(segment_id)s) JOIN segstack_%(segstack_id)s.segment_slice ss ON (aseg.segment_id = ss.segment_id) WHERE ss.slice_id IN ( SELECT scs_a.slice_a_id AS slice_id FROM segstack_%(segstack_id)s.slice_conflict scs_a, req_seg_slices WHERE scs_a.slice_b_id = req_seg_slices.slice_id UNION SELECT scs_b.slice_b_id AS slice_id FROM segstack_%(segstack_id)s.slice_conflict scs_b, req_seg_slices WHERE scs_b.slice_a_id = req_seg_slices.slice_id) OR ((ss.slice_id, ss.direction) IN (SELECT * FROM req_seg_slices))) AS conflict ''' % {'segstack_id': segstack.id, 'segment_id': segment_id, 'constraint_id': constraint_id}) return HttpResponse(json.dumps({'constraint_id': constraint_id, 'conflicting_constraint_ids': conflicting_constraint_ids}), content_type='application/json') @requires_user_role([UserRole.Annotate, UserRole.Browse]) def retrieve_user_constraints_by_blocks(request, project_id=None, stack_id=None): block_ids = [int(id) for id in safe_split(request.POST.get('block_ids'), 'block IDs')] cursor = connection.cursor() cursor.execute(''' SELECT csr.constraint_id, array_agg(csr.segment_id) as segment_ids FROM djsopnet_blockconstraintrelation bcr JOIN djsopnet_constraintsegmentrelation csr ON bcr.constraint_id = csr.constraint_id WHERE bcr.block_id IN (%s) GROUP BY csr.constraint_id ''' % ','.join(map(str, block_ids))) constraints = cursor.fetchall() return HttpResponse(json.dumps({'ok': True, 'constraints': constraints}), content_type='application/json')
gpl-3.0
thinkopensolutions/geraldo
site/newsite/django_1_0/django/utils/_os.py
27
1041
from os.path import join, normcase, abspath, sep def safe_join(base, *paths): """ Joins one or more path components to the base path component intelligently. Returns a normalized, absolute version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised). """ # We need to use normcase to ensure we don't false-negative on case # insensitive operating systems (like Windows). final_path = normcase(abspath(join(base, *paths))) base_path = normcase(abspath(base)) base_path_len = len(base_path) # Ensure final_path starts with base_path and that the next character after # the final path is os.sep (or nothing, in which case final_path must be # equal to base_path). if not final_path.startswith(base_path) \ or final_path[base_path_len:base_path_len+1] not in ('', sep): raise ValueError('the joined path is located outside of the base path' ' component') return final_path
lgpl-3.0
akionakamura/scikit-learn
sklearn/linear_model/tests/test_coordinate_descent.py
40
23697
# Authors: Olivier Grisel <olivier.grisel@ensta.org> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause from sys import version_info import numpy as np from scipy import interpolate, sparse from copy import deepcopy from sklearn.datasets import load_boston from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import TempMemmap from sklearn.linear_model.coordinate_descent import Lasso, \ LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \ MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path from sklearn.linear_model import LassoLarsCV, lars_path def check_warnings(): if version_info < (2, 6): raise SkipTest("Testing for warnings is not supported in versions \ older than Python 2.6") def test_lasso_zero(): # Check that the lasso can handle zero data without crashing X = [[0], [0], [0]] y = [0, 0, 0] clf = Lasso(alpha=0.1).fit(X, y) pred = clf.predict([[1], [2], [3]]) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(pred, [0, 0, 0]) assert_almost_equal(clf.dual_gap_, 0) def test_lasso_toy(): # Test Lasso on a toy example for various values of alpha. # When validating this against glmnet notice that glmnet divides it # against nobs. X = [[-1], [0], [1]] Y = [-1, 0, 1] # just a straight line T = [[2], [3], [4]] # test sample clf = Lasso(alpha=1e-8) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = Lasso(alpha=0.1) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.85]) assert_array_almost_equal(pred, [1.7, 2.55, 3.4]) assert_almost_equal(clf.dual_gap_, 0) clf = Lasso(alpha=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.25]) assert_array_almost_equal(pred, [0.5, 0.75, 1.]) assert_almost_equal(clf.dual_gap_, 0) clf = Lasso(alpha=1) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.0]) assert_array_almost_equal(pred, [0, 0, 0]) assert_almost_equal(clf.dual_gap_, 0) def test_enet_toy(): # Test ElasticNet for various parameters of alpha and l1_ratio. # Actually, the parameters alpha = 0 should not be allowed. However, # we test it as a border case. # ElasticNet is tested with and without precomputed Gram matrix X = np.array([[-1.], [0.], [1.]]) Y = [-1, 0, 1] # just a straight line T = [[2.], [3.], [4.]] # test sample # this should be the same as lasso clf = ElasticNet(alpha=1e-8, l1_ratio=1.0) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100, precompute=False) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf.set_params(max_iter=100, precompute=True) clf.fit(X, Y) # with Gram pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf.set_params(max_iter=100, precompute=np.dot(X.T, X)) clf.fit(X, Y) # with Gram pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.45454], 3) assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) assert_almost_equal(clf.dual_gap_, 0) def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1): """ build an ill-posed linear regression problem with many noisy features and comparatively few samples """ random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) return X, y, X_test, y_test def test_lasso_cv(): X, y, X_test, y_test = build_dataset() max_iter = 150 clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y) assert_almost_equal(clf.alpha_, 0.056, 2) clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True) clf.fit(X, y) assert_almost_equal(clf.alpha_, 0.056, 2) # Check that the lars and the coordinate descent implementation # select a similar alpha lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y) # for this we check that they don't fall in the grid of # clf.alphas further than 1 assert_true(np.abs( np.searchsorted(clf.alphas_[::-1], lars.alpha_) - np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1) # check that they also give a similar MSE mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T) np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), significant=2) # test set assert_greater(clf.score(X_test, y_test), 0.99) def test_lasso_cv_positive_constraint(): X, y, X_test, y_test = build_dataset() max_iter = 500 # Ensure the unconstrained fit has a negative coefficient clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) clf_unconstrained.fit(X, y) assert_true(min(clf_unconstrained.coef_) < 0) # On same data, constrained fit has non-negative coefficients clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1) clf_constrained.fit(X, y) assert_true(min(clf_constrained.coef_) >= 0) def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): # Test that lasso_path with lars_path style output gives the # same result # Some toy data X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T y = np.array([1, 2, 3.1]) alphas = [5., 1., .5] # Use lars_path and lasso_path(new output) with 1D linear interpolation # to compute the the same path alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso') coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1], coef_path_lars[:, ::-1]) alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas, return_models=False) coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1], coef_path_lasso2[:, ::-1]) assert_array_almost_equal( coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas), decimal=1) def test_enet_path(): # We use a large number of samples and of informative features so that # the l1_ratio selected is more toward ridge than lasso X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100, n_informative_features=100) max_iter = 150 # Here we have a small number of iterations, and thus the # ElasticNet might not converge. This is to speed up tests clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter) ignore_warnings(clf.fit)(X, y) # Well-conditioned settings, we should have selected our # smallest penalty assert_almost_equal(clf.alpha_, min(clf.alphas_)) # Non-sparse ground truth: we should have seleted an elastic-net # that is closer to ridge than to lasso assert_equal(clf.l1_ratio_, min(clf.l1_ratio)) clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter, precompute=True) ignore_warnings(clf.fit)(X, y) # Well-conditioned settings, we should have selected our # smallest penalty assert_almost_equal(clf.alpha_, min(clf.alphas_)) # Non-sparse ground truth: we should have seleted an elastic-net # that is closer to ridge than to lasso assert_equal(clf.l1_ratio_, min(clf.l1_ratio)) # We are in well-conditioned settings with low noise: we should # have a good test-set performance assert_greater(clf.score(X_test, y_test), 0.99) # Multi-output/target case X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3) clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter) ignore_warnings(clf.fit)(X, y) # We are in well-conditioned settings with low noise: we should # have a good test-set performance assert_greater(clf.score(X_test, y_test), 0.99) assert_equal(clf.coef_.shape, (3, 10)) # Mono-output should have same cross-validated alpha_ and l1_ratio_ # in both cases. X, y, _, _ = build_dataset(n_features=10) clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf1.fit(X, y) clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf2.fit(X, y[:, np.newaxis]) assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_) assert_almost_equal(clf1.alpha_, clf2.alpha_) def test_path_parameters(): X, y, _, _ = build_dataset() max_iter = 100 clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, tol=1e-3) clf.fit(X, y) # new params assert_almost_equal(0.5, clf.l1_ratio) assert_equal(50, clf.n_alphas) assert_equal(50, len(clf.alphas_)) def test_warm_start(): X, y, _, _ = build_dataset() clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) ignore_warnings(clf.fit)(X, y) ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations clf2 = ElasticNet(alpha=0.1, max_iter=10) ignore_warnings(clf2.fit)(X, y) assert_array_almost_equal(clf2.coef_, clf.coef_) def test_lasso_alpha_warning(): X = [[-1], [0], [1]] Y = [-1, 0, 1] # just a straight line clf = Lasso(alpha=0) assert_warns(UserWarning, clf.fit, X, Y) def test_lasso_positive_constraint(): X = [[-1], [0], [1]] y = [1, 0, -1] # just a straight line with negative slope lasso = Lasso(alpha=0.1, max_iter=1000, positive=True) lasso.fit(X, y) assert_true(min(lasso.coef_) >= 0) lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True) lasso.fit(X, y) assert_true(min(lasso.coef_) >= 0) def test_enet_positive_constraint(): X = [[-1], [0], [1]] y = [1, 0, -1] # just a straight line with negative slope enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True) enet.fit(X, y) assert_true(min(enet.coef_) >= 0) def test_enet_cv_positive_constraint(): X, y, X_test, y_test = build_dataset() max_iter = 500 # Ensure the unconstrained fit has a negative coefficient enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) enetcv_unconstrained.fit(X, y) assert_true(min(enetcv_unconstrained.coef_) < 0) # On same data, constrained fit has non-negative coefficients enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1) enetcv_constrained.fit(X, y) assert_true(min(enetcv_constrained.coef_) >= 0) def test_uniform_targets(): enet = ElasticNetCV(fit_intercept=True, n_alphas=3) m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3) lasso = LassoCV(fit_intercept=True, n_alphas=3) m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3) models_single_task = (enet, lasso) models_multi_task = (m_enet, m_lasso) rng = np.random.RandomState(0) X_train = rng.random_sample(size=(10, 3)) X_test = rng.random_sample(size=(10, 3)) y1 = np.empty(10) y2 = np.empty((10, 2)) for model in models_single_task: for y_values in (0, 5): y1.fill(y_values) assert_array_equal(model.fit(X_train, y1).predict(X_test), y1) assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3) for model in models_multi_task: for y_values in (0, 5): y2[:, 0].fill(y_values) y2[:, 1].fill(2 * y_values) assert_array_equal(model.fit(X_train, y2).predict(X_test), y2) assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3) def test_multi_task_lasso_and_enet(): X, y, X_test, y_test = build_dataset() Y = np.c_[y, y] # Y_test = np.c_[y_test, y_test] clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) assert_true(0 < clf.dual_gap_ < 1e-5) assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y) assert_true(0 < clf.dual_gap_ < 1e-5) assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) def test_lasso_readonly_data(): X = np.array([[-1], [0], [1]]) Y = np.array([-1, 0, 1]) # just a straight line T = np.array([[2], [3], [4]]) # test sample with TempMemmap((X, Y)) as (X, Y): clf = Lasso(alpha=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.25]) assert_array_almost_equal(pred, [0.5, 0.75, 1.]) assert_almost_equal(clf.dual_gap_, 0) def test_multi_task_lasso_readonly_data(): X, y, X_test, y_test = build_dataset() Y = np.c_[y, y] with TempMemmap((X, Y)) as (X, Y): Y = np.c_[y, y] clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) assert_true(0 < clf.dual_gap_ < 1e-5) assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) def test_enet_multitarget(): n_targets = 3 X, y, _, _ = build_dataset(n_samples=10, n_features=8, n_informative_features=10, n_targets=n_targets) estimator = ElasticNet(alpha=0.01, fit_intercept=True) estimator.fit(X, y) coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_, estimator.dual_gap_) for k in range(n_targets): estimator.fit(X, y[:, k]) assert_array_almost_equal(coef[k, :], estimator.coef_) assert_array_almost_equal(intercept[k], estimator.intercept_) assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) def test_multioutput_enetcv_error(): X = np.random.randn(10, 2) y = np.random.randn(10, 2) clf = ElasticNetCV() assert_raises(ValueError, clf.fit, X, y) def test_multitask_enet_and_lasso_cv(): X, y, _, _ = build_dataset(n_features=100, n_targets=3) clf = MultiTaskElasticNetCV().fit(X, y) assert_almost_equal(clf.alpha_, 0.00556, 3) clf = MultiTaskLassoCV().fit(X, y) assert_almost_equal(clf.alpha_, 0.00278, 3) X, y, _, _ = build_dataset(n_targets=3) clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100, l1_ratio=[0.3, 0.5], tol=1e-3) clf.fit(X, y) assert_equal(0.5, clf.l1_ratio_) assert_equal((3, X.shape[1]), clf.coef_.shape) assert_equal((3, ), clf.intercept_.shape) assert_equal((2, 50, 3), clf.mse_path_.shape) assert_equal((2, 50), clf.alphas_.shape) X, y, _, _ = build_dataset(n_targets=3) clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3) clf.fit(X, y) assert_equal((3, X.shape[1]), clf.coef_.shape) assert_equal((3, ), clf.intercept_.shape) assert_equal((50, 3), clf.mse_path_.shape) assert_equal(50, len(clf.alphas_)) def test_1d_multioutput_enet_and_multitask_enet_cv(): X, y, _, _ = build_dataset(n_features=10) y = y[:, np.newaxis] clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf.fit(X, y[:, 0]) clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf1.fit(X, y) assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_) assert_almost_equal(clf.alpha_, clf1.alpha_) assert_almost_equal(clf.coef_, clf1.coef_[0]) assert_almost_equal(clf.intercept_, clf1.intercept_[0]) def test_1d_multioutput_lasso_and_multitask_lasso_cv(): X, y, _, _ = build_dataset(n_features=10) y = y[:, np.newaxis] clf = LassoCV(n_alphas=5, eps=2e-3) clf.fit(X, y[:, 0]) clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3) clf1.fit(X, y) assert_almost_equal(clf.alpha_, clf1.alpha_) assert_almost_equal(clf.coef_, clf1.coef_[0]) assert_almost_equal(clf.intercept_, clf1.intercept_[0]) def test_sparse_input_dtype_enet_and_lassocv(): X, y, _, _ = build_dataset(n_features=10) clf = ElasticNetCV(n_alphas=5) clf.fit(sparse.csr_matrix(X), y) clf1 = ElasticNetCV(n_alphas=5) clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y) assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) clf = LassoCV(n_alphas=5) clf.fit(sparse.csr_matrix(X), y) clf1 = LassoCV(n_alphas=5) clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y) assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) def test_precompute_invalid_argument(): X, y, _, _ = build_dataset() for clf in [ElasticNetCV(precompute="invalid"), LassoCV(precompute="invalid")]: assert_raises(ValueError, clf.fit, X, y) def test_warm_start_convergence(): X, y, _, _ = build_dataset() model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y) n_iter_reference = model.n_iter_ # This dataset is not trivial enough for the model to converge in one pass. assert_greater(n_iter_reference, 2) # Check that n_iter_ is invariant to multiple calls to fit # when warm_start=False, all else being equal. model.fit(X, y) n_iter_cold_start = model.n_iter_ assert_equal(n_iter_cold_start, n_iter_reference) # Fit the same model again, using a warm start: the optimizer just performs # a single pass before checking that it has already converged model.set_params(warm_start=True) model.fit(X, y) n_iter_warm_start = model.n_iter_ assert_equal(n_iter_warm_start, 1) def test_warm_start_convergence_with_regularizer_decrement(): boston = load_boston() X, y = boston.data, boston.target # Train a model to converge on a lightly regularized problem final_alpha = 1e-5 low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y) # Fitting a new model on a more regularized version of the same problem. # Fitting with high regularization is easier it should converge faster # in general. high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y) assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_) # Fit the solution to the original, less regularized version of the # problem but from the solution of the highly regularized variant of # the problem as a better starting point. This should also converge # faster than the original model that starts from zero. warm_low_reg_model = deepcopy(high_reg_model) warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha) warm_low_reg_model.fit(X, y) assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_) def test_random_descent(): # Test that both random and cyclic selection give the same results. # Ensure that the test models fully converge and check a wide # range of conditions. # This uses the coordinate descent algo using the gram trick. X, y, _, _ = build_dataset(n_samples=50, n_features=20) clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(X, y) clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(X, y) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # This uses the descent algo without the gram trick clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(X.T, y[:20]) clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(X.T, y[:20]) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # Sparse Case clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(sparse.csr_matrix(X), y) clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(sparse.csr_matrix(X), y) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # Multioutput case. new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(X, new_y) clf_random = MultiTaskElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(X, new_y) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # Raise error when selection is not in cyclic or random. clf_random = ElasticNet(selection='invalid') assert_raises(ValueError, clf_random.fit, X, y) def test_deprection_precompute_enet(): # Test that setting precompute="auto" gives a Deprecation Warning. X, y, _, _ = build_dataset(n_samples=20, n_features=10) clf = ElasticNet(precompute="auto") assert_warns(DeprecationWarning, clf.fit, X, y) clf = Lasso(precompute="auto") assert_warns(DeprecationWarning, clf.fit, X, y) def test_enet_path_positive(): # Test that the coefs returned by positive=True in enet_path are positive X, y, _, _ = build_dataset(n_samples=50, n_features=50) for path in [enet_path, lasso_path]: pos_path_coef = path(X, y, positive=True)[1] assert_true(np.all(pos_path_coef >= 0)) def test_sparse_dense_descent_paths(): # Test that dense and sparse input give the same input for descent paths. X, y, _, _ = build_dataset(n_samples=50, n_features=20) csr = sparse.csr_matrix(X) for path in [enet_path, lasso_path]: _, coefs, _ = path(X, y, fit_intercept=False) _, sparse_coefs, _ = path(csr, y, fit_intercept=False) assert_array_almost_equal(coefs, sparse_coefs)
bsd-3-clause
M4rtinK/modrana
core/bundle/upoints/weather_stations.py
2
12056
# # vim: set sw=4 sts=4 et tw=80 fileencoding=utf-8: # """weather_stations - Imports weather station data files""" # Copyright (C) 2007-2010 James Rowe # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __doc__ += """. .. moduleauthor:: James Rowe <jnrowe@gmail.com> .. versionadded:: 0.2.0 """ import logging from . import (point, trigpoints, utils) class Station(trigpoints.Trigpoint): """Class for representing a weather station from a NOAA data file .. versionadded:: 0.2.0 """ __slots__ = ('alt_id', 'state', 'country', 'wmo', 'ua_latitude', 'ua_longitude', 'ua_altitude', 'rbsn') def __init__(self, alt_id, name, state, country, wmo, latitude, longitude, ua_latitude, ua_longitude, altitude, ua_altitude, rbsn): """Initialise a new ``Station`` object >>> Station('EGLL', 'London / Heathrow Airport', None, ... 'United Kingdom', 6, 51.4833333333, -0.45, None, None, 24, ... 0, True) Station('EGLL', 'London / Heathrow Airport', None, 'United Kingdom', 6, 51.4833333333, -0.45, None, None, 24, 0, True) :type alt_id: ``str`` or ``None`` :param alt_id: Alternate location identifier :type name: ``str`` :param name: Station's name :type state: ``str`` or ``None`` :param state: State name, if station is in the US :type country: ``str`` :param country: Country name :type wmo: ``int`` :param wmo: WMO region code :type latitude: ``float`` :param latitude: Station's latitude :type longitude: ``float`` :param longitude: Station's longitude :type ua_latitude: ``float`` or ``None`` :param ua_latitude: Station's upper air latitude :type ua_longitude: ``float`` or ``None`` :param ua_longitude: Station's upper air longitude :type altitude: ``int`` or ``None`` :param altitude: Station's elevation :type ua_altitude: ``int`` or ``None`` :param ua_altitude: Station's upper air elevation :type rbsn: ``bool`` :param rbsn: True if station belongs to RSBN """ super(Station, self).__init__(latitude, longitude, altitude, name) self.alt_id = alt_id self.state = state self.country = country self.wmo = wmo self.ua_latitude = ua_latitude self.ua_longitude = ua_longitude self.ua_altitude = ua_altitude self.rbsn = rbsn def __str__(self, mode="dd"): """Pretty printed location string .. seealso:: :type :class:`trigpoints.point.Point` >>> Heathrow = Station("EGLL", "London / Heathrow Airport", None, ... "United Kingdom", 6, 51.048333, -0.450000, None, ... None, 24, 0, True) >>> print(Heathrow) London / Heathrow Airport (EGLL - N51.048°; W000.450°) >>> print(Heathrow.__str__(mode="dms")) London / Heathrow Airport (EGLL - 51°02'53"N, 000°27'00"W) >>> print(Heathrow.__str__(mode="dm")) London / Heathrow Airport (EGLL - 51°02.90'N, 000°27.00'W) >>> Heathrow.alt_id = None >>> print(Heathrow) London / Heathrow Airport (N51.048°; W000.450°) :type mode: ``str`` :param mode: Coordinate formatting system to use :rtype: ``str`` :return: Human readable string representation of ``Station`` object """ text = super(Station.__base__, self).__str__(mode) if self.alt_id: return "%s (%s - %s)" % (self.name, self.alt_id, text) else: return "%s (%s)" % (self.name, text) class Stations(point.KeyedPoints): """Class for representing a group of `Station` objects .. versionadded:: 0.5.1 """ def __init__(self, data=None, index="WMO"): """Initialise a new `Stations` object""" super(Stations, self).__init__() self._data = data self._index = index if data: self.import_locations(data, index) def import_locations(self, data, index="WMO"): """Parse NOAA weather station data files ``import_locations()`` returns a dictionary with keys containing either the WMO or ICAO identifier, and values that are ``Station`` objects that describes the large variety of data exported by NOAA_. It expects data files in one of the following formats:: 00;000;PABL;Buckland, Buckland Airport;AK;United States;4;65-58-56N;161-09-07W;;;7;; 01;001;ENJA;Jan Mayen;;Norway;6;70-56N;008-40W;70-56N;008-40W;10;9;P 01;002;----;Grahuken;;Norway;6;79-47N;014-28E;;;;15; or:: AYMD;94;014;Madang;;Papua New Guinea;5;05-13S;145-47E;05-13S;145-47E;3;5;P AYMO;--;---;Manus Island/Momote;;Papua New Guinea;5;02-03-43S;147-25-27E;;;4;; AYPY;94;035;Moresby;;Papua New Guinea;5;09-26S;147-13E;09-26S;147-13E;38;49;P Files containing the data in this format can be downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)`'s site in their `station location page`_. WMO indexed files downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` site when processed by ``import_locations()`` will return ``dict`` object of the following style:: {'00000': Station('PABL', 'Buckland, Buckland Airport', 'AK', 'United States', 4, 65.982222. -160.848055, None, None, 7, False), '01001'; Station('ENJA', Jan Mayen, None, 'Norway', 6, 70.933333, -7.333333, 70.933333, -7.333333, 10, 9, True), '01002': Station(None, 'Grahuken', None, 'Norway', 6, 79.783333, 13.533333, None, None, 15, False)} And ``dict`` objects such as the following will be created when ICAO indexed data files are processed:: {'AYMD': Station("94", "014", "Madang", None, "Papua New Guinea", 5, -5.216666, 145.783333, -5.216666, 145.78333333333333, 3, 5, True, 'AYMO': Station(None, None, "Manus Island/Momote", None, "Papua New Guinea", 5, -2.061944, 147.424166, None, None, 4, False, 'AYPY': Station("94", "035", "Moresby", None, "Papua New Guinea", 5, -9.433333, 147.216667, -9.433333, 147.216667, 38, 49, True} >>> stations = Stations(open("WMO_stations")) >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) 00000 - Buckland, Buckland Airport (PABL - N65.982°; W161.152°) 01001 - Jan Mayen (ENJA - N70.933°; W008.667°) 01002 - Grahuken (N79.783°; E014.467°) >>> stations = Stations(open("ICAO_stations"), "ICAO") >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) AYMD - Madang (94014 - S05.217°; E145.783°) AYMO - Manus Island/Momote (S02.062°; E147.424°) AYPY - Moresby (94035 - S09.433°; E147.217°) >>> stations = Stations(open("broken_WMO_stations")) >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) 71046 - Komakuk Beach, Y. T. (CWKM - N69.617°; W140.200°) 71899 - Langara, B. C. (CWLA - N54.250°; W133.133°) >>> stations = Stations(open("broken_ICAO_stations"), "ICAO") >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) KBRX - Bordeaux (N41.933°; W104.950°) KCQB - Chandler, Chandler Municipal Airport (N35.724°; W096.820°) KTYR - Tyler, Tyler Pounds Field (N32.359°; W095.404°) :type data: ``file``, ``list`` or ``str`` :param data: NOAA station data to read :type index: ``str`` :param index: The identifier type used in the file :rtype: ``dict`` :return: WMO locations with `Station` objects :raise FileFormatError: Unknown file format .. _NOAA: http://weather.noaa.gov/ .. _station location page: http://weather.noaa.gov/tg/site.shtml """ self._data = data data = utils.prepare_read(data) for line in data: line = line.strip() chunk = line.split(";") if not len(chunk) == 14: if index == "ICAO": # Some entries only have 12 or 13 elements, so we assume 13 # and 14 are None. Of the entries I've hand checked this # assumption would be correct. logging.debug("Extending ICAO `%s' entry, because it is " "too short to process" % line) chunk.extend(["", ""]) elif index == "WMO" and len(chunk) == 13: # A few of the WMO indexed entries are missing their RBSN # fields, hand checking the entries for 71046 and 71899 # shows that they are correct if we just assume RBSN is # false. logging.debug("Extending WMO `%s' entry, because it is " "too short to process" % line) chunk.append("") else: raise utils.FileFormatError("NOAA") if index == "WMO": identifier = "".join(chunk[:2]) alt_id = chunk[2] elif index == "ICAO": identifier = chunk[0] alt_id = "".join(chunk[1:3]) else: raise ValueError("Unknown format `%s'" % index) if alt_id in ("----", "-----"): alt_id = None name = chunk[3] state = chunk[4] if chunk[4] else None country = chunk[5] wmo = int(chunk[6]) if chunk[6] else None point_data = [] for i in chunk[7:11]: if not i: point_data.append(None) continue # Some entries in nsd_cccc.txt are of the format "DD-MM- # N", so we just take the spaces to mean 0 seconds. if " " in i: logging.debug("Fixing unpadded location data in `%s' entry" % line) i = i.replace(" ", "0") values = map(int, i[:-1].split("-")) if i[-1] in ("S", "W"): values = [-i for i in values] point_data.append(point.utils.to_dd(*values)) latitude, longitude, ua_latitude, ua_longitude = point_data altitude = int(chunk[11]) if chunk[11] else None ua_altitude = int(chunk[12]) if chunk[12] else None rbsn = False if not chunk[13] else True self[identifier] = Station(alt_id, name, state, country, wmo, latitude, longitude, ua_latitude, ua_longitude, altitude, ua_altitude, rbsn)
gpl-3.0
jcftang/ansible
lib/ansible/modules/network/cumulus/cl_ports.py
27
7194
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com> # # This file is part of Ansible # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: cl_ports version_added: "2.1" author: "Cumulus Networks (@CumulusNetworks)" short_description: Configure Cumulus Switch port attributes (ports.conf) description: - Set the initial port attribute defined in the Cumulus Linux ports.conf, file. This module does not do any error checking at the moment. Be careful to not include ports that do not exist on the switch. Carefully read the original ports.conf file for any exceptions or limitations. For more details go the Configure Switch Port Attribute Documentation at U(http://docs.cumulusnetworks.com). options: speed_10g: description: - List of ports to run initial run at 10G. speed_40g: description: - List of ports to run initial run at 40G. speed_4_by_10g: description: - List of 40G ports that will be unganged to run as 4 10G ports. speed_40g_div_4: description: - List of 10G ports that will be ganged to form a 40G port. ''' EXAMPLES = ''' # Use cl_ports module to manage the switch attributes defined in the # ports.conf file on Cumulus Linux ## Unganged port configuration on certain ports - name: configure ports.conf setup cl_ports: speed_4_by_10g: - swp1 - swp32 speed_40g: - swp2-31 ## Unganged port configuration on certain ports - name: configure ports.conf setup cl_ports: speed_4_by_10g: - swp1-3 - swp6 speed_40g: - swp4-5 - swp7-32 ''' RETURN = ''' changed: description: whether the interface was changed returned: changed type: bool sample: True msg: description: human-readable report of success or failure returned: always type: string sample: "interface bond0 config updated" ''' PORTS_CONF = '/etc/cumulus/ports.conf' def hash_existing_ports_conf(module): module.ports_conf_hash = {} if not os.path.exists(PORTS_CONF): return False try: existing_ports_conf = open(PORTS_CONF).readlines() except IOError: error_msg = get_exception() _msg = "Failed to open %s: %s" % (PORTS_CONF, error_msg) module.fail_json(msg=_msg) return # for testing only should return on module.fail_json for _line in existing_ports_conf: _m0 = re.match(r'^(\d+)=(\w+)', _line) if _m0: _portnum = int(_m0.group(1)) _speed = _m0.group(2) module.ports_conf_hash[_portnum] = _speed def generate_new_ports_conf_hash(module): new_ports_conf_hash = {} convert_hash = { 'speed_40g_div_4': '40G/4', 'speed_4_by_10g': '4x10G', 'speed_10g': '10G', 'speed_40g': '40G' } for k in module.params.keys(): port_range = module.params[k] port_setting = convert_hash[k] if port_range: port_range = [x for x in port_range if x] for port_str in port_range: port_range_str = port_str.replace('swp', '').split('-') if len(port_range_str) == 1: new_ports_conf_hash[int(port_range_str[0])] = \ port_setting else: int_range = map(int, port_range_str) portnum_range = range(int_range[0], int_range[1]+1) for i in portnum_range: new_ports_conf_hash[i] = port_setting module.new_ports_hash = new_ports_conf_hash def compare_new_and_old_port_conf_hash(module): ports_conf_hash_copy = module.ports_conf_hash.copy() module.ports_conf_hash.update(module.new_ports_hash) port_num_length = len(module.ports_conf_hash.keys()) orig_port_num_length = len(ports_conf_hash_copy.keys()) if port_num_length != orig_port_num_length: module.fail_json(msg="Port numbering is wrong. \ Too many or two few ports configured") return False elif ports_conf_hash_copy == module.ports_conf_hash: return False return True def make_copy_of_orig_ports_conf(module): if os.path.exists(PORTS_CONF + '.orig'): return try: shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig') except IOError: error_msg = get_exception() _msg = "Failed to save the original %s: %s" % (PORTS_CONF, error_msg) module.fail_json(msg=_msg) return # for testing only def write_to_ports_conf(module): """ use tempfile to first write out config in temp file then write to actual location. may help prevent file corruption. Ports.conf is a critical file for Cumulus. Don't want to corrupt this file under any circumstance. """ temp = tempfile.NamedTemporaryFile() try: try: temp.write('# Managed By Ansible\n') for k in sorted(module.ports_conf_hash.keys()): port_setting = module.ports_conf_hash[k] _str = "%s=%s\n" % (k, port_setting) temp.write(_str) temp.seek(0) shutil.copyfile(temp.name, PORTS_CONF) except IOError: error_msg = get_exception() module.fail_json( msg="Failed to write to %s: %s" % (PORTS_CONF, error_msg)) finally: temp.close() def main(): module = AnsibleModule( argument_spec=dict( speed_40g_div_4=dict(type='list'), speed_4_by_10g=dict(type='list'), speed_10g=dict(type='list'), speed_40g=dict(type='list') ), required_one_of=[['speed_40g_div_4', 'speed_4_by_10g', 'speed_10g', 'speed_40g']] ) _changed = False hash_existing_ports_conf(module) generate_new_ports_conf_hash(module) if compare_new_and_old_port_conf_hash(module): make_copy_of_orig_ports_conf(module) write_to_ports_conf(module) _changed = True _msg = "/etc/cumulus/ports.conf changed" else: _msg = 'No change in /etc/ports.conf' module.exit_json(changed=_changed, msg=_msg) # import module snippets from ansible.module_utils.basic import * # from ansible.module_utils.urls import * import os import tempfile import shutil if __name__ == '__main__': main()
gpl-3.0
Jenselme/servo
components/script/dom/bindings/codegen/parser/tests/test_attr_sequence_type.py
276
1626
def WebIDLTest(parser, harness): threw = False try: parser.parse(""" interface AttrSequenceType { attribute sequence<object> foo; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Attribute type must not be a sequence type") parser.reset() threw = False try: parser.parse(""" interface AttrUnionWithSequenceType { attribute (sequence<object> or DOMString) foo; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Attribute type must not be a union with a sequence member type") parser.reset() threw = False try: parser.parse(""" interface AttrNullableUnionWithSequenceType { attribute (sequence<object>? or DOMString) foo; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Attribute type must not be a union with a nullable sequence " "member type") parser.reset() threw = False try: parser.parse(""" interface AttrUnionWithUnionWithSequenceType { attribute ((sequence<object> or DOMString) or AttrUnionWithUnionWithSequenceType) foo; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Attribute type must not be a union type with a union member " "type that has a sequence member type")
mpl-2.0
sozoStudio/sozoStudio.github.io
FoguangTemple_web/node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py
1789
10585
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode-ninja wrapper project file generator. This updates the data structures passed to the Xcode gyp generator to build with ninja instead. The Xcode project itself is transformed into a list of executable targets, each with a build step to build with ninja, and a target with every source and resource file. This appears to sidestep some of the major performance headaches experienced using complex projects and large number of targets within Xcode. """ import errno import gyp.generator.ninja import os import re import xml.sax.saxutils def _WriteWorkspace(main_gyp, sources_gyp, params): """ Create a workspace to wrap main and sources gyp paths. """ (build_file_root, build_file_ext) = os.path.splitext(main_gyp) workspace_path = build_file_root + '.xcworkspace' options = params['options'] if options.generator_output: workspace_path = os.path.join(options.generator_output, workspace_path) try: os.makedirs(workspace_path) except OSError, e: if e.errno != errno.EEXIST: raise output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ '<Workspace version = "1.0">\n' for gyp_name in [main_gyp, sources_gyp]: name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' name = xml.sax.saxutils.quoteattr("group:" + name) output_string += ' <FileRef location = %s></FileRef>\n' % name output_string += '</Workspace>\n' workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") try: with open(workspace_file, 'r') as input_file: input_string = input_file.read() if input_string == output_string: return except IOError: # Ignore errors if the file doesn't exist. pass with open(workspace_file, 'w') as output_file: output_file.write(output_string) def _TargetFromSpec(old_spec, params): """ Create fake target for xcode-ninja wrapper. """ # Determine ninja top level build dir (e.g. /path/to/out). ninja_toplevel = None jobs = 0 if params: options = params['options'] ninja_toplevel = \ os.path.join(options.toplevel_dir, gyp.generator.ninja.ComputeOutputDir(params)) jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0) target_name = old_spec.get('target_name') product_name = old_spec.get('product_name', target_name) product_extension = old_spec.get('product_extension') ninja_target = {} ninja_target['target_name'] = target_name ninja_target['product_name'] = product_name if product_extension: ninja_target['product_extension'] = product_extension ninja_target['toolset'] = old_spec.get('toolset') ninja_target['default_configuration'] = old_spec.get('default_configuration') ninja_target['configurations'] = {} # Tell Xcode to look in |ninja_toplevel| for build products. new_xcode_settings = {} if ninja_toplevel: new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \ "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel if 'configurations' in old_spec: for config in old_spec['configurations'].iterkeys(): old_xcode_settings = \ old_spec['configurations'][config].get('xcode_settings', {}) if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings: new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO" new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \ old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] ninja_target['configurations'][config] = {} ninja_target['configurations'][config]['xcode_settings'] = \ new_xcode_settings ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0) ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0) ninja_target['ios_watchkit_extension'] = \ old_spec.get('ios_watchkit_extension', 0) ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0) ninja_target['type'] = old_spec['type'] if ninja_toplevel: ninja_target['actions'] = [ { 'action_name': 'Compile and copy %s via ninja' % target_name, 'inputs': [], 'outputs': [], 'action': [ 'env', 'PATH=%s' % os.environ['PATH'], 'ninja', '-C', new_xcode_settings['CONFIGURATION_BUILD_DIR'], target_name, ], 'message': 'Compile and copy %s via ninja' % target_name, }, ] if jobs > 0: ninja_target['actions'][0]['action'].extend(('-j', jobs)) return ninja_target def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): """Limit targets for Xcode wrapper. Xcode sometimes performs poorly with too many targets, so only include proper executable targets, with filters to customize. Arguments: target_extras: Regular expression to always add, matching any target. executable_target_pattern: Regular expression limiting executable targets. spec: Specifications for target. """ target_name = spec.get('target_name') # Always include targets matching target_extras. if target_extras is not None and re.search(target_extras, target_name): return True # Otherwise just show executable targets. if spec.get('type', '') == 'executable' and \ spec.get('product_extension', '') != 'bundle': # If there is a filter and the target does not match, exclude the target. if executable_target_pattern is not None: if not re.search(executable_target_pattern, target_name): return False return True return False def CreateWrapper(target_list, target_dicts, data, params): """Initialize targets for the ninja wrapper. This sets up the necessary variables in the targets to generate Xcode projects that use ninja as an external builder. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dict of flattened build files keyed on gyp path. params: Dict of global options for gyp. """ orig_gyp = params['build_files'][0] for gyp_name, gyp_dict in data.iteritems(): if gyp_name == orig_gyp: depth = gyp_dict['_DEPTH'] # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE # and prepend .ninja before the .gyp extension. generator_flags = params.get('generator_flags', {}) main_gyp = generator_flags.get('xcode_ninja_main_gyp', None) if main_gyp is None: (build_file_root, build_file_ext) = os.path.splitext(orig_gyp) main_gyp = build_file_root + ".ninja" + build_file_ext # Create new |target_list|, |target_dicts| and |data| data structures. new_target_list = [] new_target_dicts = {} new_data = {} # Set base keys needed for |data|. new_data[main_gyp] = {} new_data[main_gyp]['included_files'] = [] new_data[main_gyp]['targets'] = [] new_data[main_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) # Normally the xcode-ninja generator includes only valid executable targets. # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to # executable targets that match the pattern. (Default all) executable_target_pattern = \ generator_flags.get('xcode_ninja_executable_target_pattern', None) # For including other non-executable targets, add the matching target name # to the |xcode_ninja_target_pattern| regular expression. (Default none) target_extras = generator_flags.get('xcode_ninja_target_pattern', None) for old_qualified_target in target_list: spec = target_dicts[old_qualified_target] if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): # Add to new_target_list. target_name = spec.get('target_name') new_target_name = '%s:%s#target' % (main_gyp, target_name) new_target_list.append(new_target_name) # Add to new_target_dicts. new_target_dicts[new_target_name] = _TargetFromSpec(spec, params) # Add to new_data. for old_target in data[old_qualified_target.split(':')[0]]['targets']: if old_target['target_name'] == target_name: new_data_target = {} new_data_target['target_name'] = old_target['target_name'] new_data_target['toolset'] = old_target['toolset'] new_data[main_gyp]['targets'].append(new_data_target) # Create sources target. sources_target_name = 'sources_for_indexing' sources_target = _TargetFromSpec( { 'target_name' : sources_target_name, 'toolset': 'target', 'default_configuration': 'Default', 'mac_bundle': '0', 'type': 'executable' }, None) # Tell Xcode to look everywhere for headers. sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } } sources = [] for target, target_dict in target_dicts.iteritems(): base = os.path.dirname(target) files = target_dict.get('sources', []) + \ target_dict.get('mac_bundle_resources', []) for action in target_dict.get('actions', []): files.extend(action.get('inputs', [])) # Remove files starting with $. These are mostly intermediate files for the # build system. files = [ file for file in files if not file.startswith('$')] # Make sources relative to root build file. relative_path = os.path.dirname(main_gyp) sources += [ os.path.relpath(os.path.join(base, file), relative_path) for file in files ] sources_target['sources'] = sorted(set(sources)) # Put sources_to_index in it's own gyp. sources_gyp = \ os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp") fully_qualified_target_name = \ '%s:%s#target' % (sources_gyp, sources_target_name) # Add to new_target_list, new_target_dicts and new_data. new_target_list.append(fully_qualified_target_name) new_target_dicts[fully_qualified_target_name] = sources_target new_data_target = {} new_data_target['target_name'] = sources_target['target_name'] new_data_target['_DEPTH'] = depth new_data_target['toolset'] = "target" new_data[sources_gyp] = {} new_data[sources_gyp]['targets'] = [] new_data[sources_gyp]['included_files'] = [] new_data[sources_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) new_data[sources_gyp]['targets'].append(new_data_target) # Write workspace to file. _WriteWorkspace(main_gyp, sources_gyp, params) return (new_target_list, new_target_dicts, new_data)
apache-2.0
openstack/watcher
watcher/conf/clients_auth.py
2
1085
# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi <prudhvi.rao.shedimbi@intel.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading as ka_loading WATCHER_CLIENTS_AUTH = 'watcher_clients_auth' def register_opts(conf): ka_loading.register_session_conf_options(conf, WATCHER_CLIENTS_AUTH) ka_loading.register_auth_conf_options(conf, WATCHER_CLIENTS_AUTH) def list_opts(): return [(WATCHER_CLIENTS_AUTH, ka_loading.get_session_conf_options() + ka_loading.get_auth_common_conf_options())]
apache-2.0
SlimRoms/android_external_chromium_org
chrome/common/extensions/docs/server2/template_data_source_test.py
15
2395
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import unittest from extensions_paths import SERVER2 from server_instance import ServerInstance from template_data_source import TemplateDataSource from test_util import DisableLogging, ReadFile from third_party.handlebar import Handlebar def _ReadFile(*path): return ReadFile(SERVER2, 'test_data', 'template_data_source', *path) def _CreateTestDataSource(base_dir): '''TemplateDataSource is not instantiated directly, rather, its methods are invoked through a subclass of it, which has as its only data the directory in which TemplateDataSource methods should act on. Thus, we test TemplateDataSource indirectly through the TestDataSource class ''' return TestDataSource(ServerInstance.ForLocal(), '%stest_data/template_data_source/%s/' % (SERVER2, base_dir)) class TestDataSource(TemplateDataSource): '''Provides a subclass we can use to test the TemplateDataSource methods ''' def __init__(self, server_instance, base_dir): type(self)._BASE = base_dir TemplateDataSource.__init__(self, server_instance) class TemplateDataSourceTest(unittest.TestCase): def testSimple(self): test_data_source = _CreateTestDataSource('simple') template_a1 = Handlebar(_ReadFile('simple', 'test1.html')) context = [{}, {'templates': {}}] self.assertEqual( template_a1.Render(*context).text, test_data_source.get('test1').Render(*context).text) template_a2 = Handlebar(_ReadFile('simple', 'test2.html')) self.assertEqual( template_a2.Render(*context).text, test_data_source.get('test2').Render(*context).text) @DisableLogging('warning') def testNotFound(self): test_data_source = _CreateTestDataSource('simple') self.assertEqual(None, test_data_source.get('junk')) @DisableLogging('warning') def testPartials(self): test_data_source = _CreateTestDataSource('partials') context = json.loads(_ReadFile('partials', 'input.json')) self.assertEqual( _ReadFile('partials', 'test_expected.html'), test_data_source.get('test_tmpl').Render( context, test_data_source).text) if __name__ == '__main__': unittest.main()
bsd-3-clause
glaubitz/fs-uae-debian
launcher/OpenGL/raw/GL/AMD/debug_output.py
9
2233
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GL import _types as _cs # End users want this... from OpenGL.raw.GL._types import * from OpenGL.raw.GL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GL_AMD_debug_output' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GL,'GL_AMD_debug_output',error_checker=_errors._error_checker) GL_DEBUG_CATEGORY_API_ERROR_AMD=_C('GL_DEBUG_CATEGORY_API_ERROR_AMD',0x9149) GL_DEBUG_CATEGORY_APPLICATION_AMD=_C('GL_DEBUG_CATEGORY_APPLICATION_AMD',0x914F) GL_DEBUG_CATEGORY_DEPRECATION_AMD=_C('GL_DEBUG_CATEGORY_DEPRECATION_AMD',0x914B) GL_DEBUG_CATEGORY_OTHER_AMD=_C('GL_DEBUG_CATEGORY_OTHER_AMD',0x9150) GL_DEBUG_CATEGORY_PERFORMANCE_AMD=_C('GL_DEBUG_CATEGORY_PERFORMANCE_AMD',0x914D) GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD=_C('GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD',0x914E) GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD=_C('GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD',0x914C) GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD=_C('GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD',0x914A) GL_DEBUG_LOGGED_MESSAGES_AMD=_C('GL_DEBUG_LOGGED_MESSAGES_AMD',0x9145) GL_DEBUG_SEVERITY_HIGH_AMD=_C('GL_DEBUG_SEVERITY_HIGH_AMD',0x9146) GL_DEBUG_SEVERITY_LOW_AMD=_C('GL_DEBUG_SEVERITY_LOW_AMD',0x9148) GL_DEBUG_SEVERITY_MEDIUM_AMD=_C('GL_DEBUG_SEVERITY_MEDIUM_AMD',0x9147) GL_MAX_DEBUG_LOGGED_MESSAGES_AMD=_C('GL_MAX_DEBUG_LOGGED_MESSAGES_AMD',0x9144) GL_MAX_DEBUG_MESSAGE_LENGTH_AMD=_C('GL_MAX_DEBUG_MESSAGE_LENGTH_AMD',0x9143) @_f @_p.types(None,_cs.GLDEBUGPROCAMD,ctypes.c_void_p) def glDebugMessageCallbackAMD(callback,userParam):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray,_cs.GLboolean) def glDebugMessageEnableAMD(category,severity,count,ids,enabled):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLcharArray) def glDebugMessageInsertAMD(category,severity,id,length,buf):pass @_f @_p.types(_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,arrays.GLuintArray,arrays.GLuintArray,arrays.GLsizeiArray,arrays.GLcharArray) def glGetDebugMessageLogAMD(count,bufsize,categories,severities,ids,lengths,message):pass
gpl-2.0
elainenaomi/sciwonc-dataflow-examples
sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/python2.7/dist-packages/Pegasus/netlogger/configobj.py
1
88638
# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2008 Michael Foord, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # nico AT tekNico DOT net # ConfigObj 4 # http://www.voidspace.org.uk/python/configobj.html # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. from __future__ import generators import sys INTP_VER = sys.version_info[:2] if INTP_VER < (2, 2): raise RuntimeError("Python v.2.2 or later needed") import os, re compiler = None try: import compiler except ImportError: # for IronPython pass from types import StringTypes from warnings import warn try: from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE except ImportError: # Python 2.2 does not have these # UTF-8 BOM_UTF8 = '\xef\xbb\xbf' # UTF-16, little endian BOM_UTF16_LE = '\xff\xfe' # UTF-16, big endian BOM_UTF16_BE = '\xfe\xff' if sys.byteorder == 'little': # UTF-16, native endianness BOM_UTF16 = BOM_UTF16_LE else: # UTF-16, native endianness BOM_UTF16 = BOM_UTF16_BE # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\t\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" try: enumerate except NameError: def enumerate(obj): """enumerate for Python 2.2.""" i = -1 for item in obj: i += 1 yield i, item try: True, False except NameError: True, False = 1, 0 __version__ = '4.5.2' __revision__ = '$Id: configobj.py 23978 2009-10-21 21:43:02Z ksb $' __docformat__ = "restructuredtext en" __all__ = ( '__version__', 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', '__docformat__', 'flatten_errors', ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } def getObj(s): s = "a=" + s if compiler is None: raise ImportError('compiler module not available') p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = map(self.build_Const, o.getChildren()) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s return _builder.build(getObj(s)) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, msg='', line_number=None, line=''): self.line = line self.line_number = line_number self.msg = msg SyntaxError.__init__(self, msg) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): InterpolationError.__init__( self, 'missing option "%s" in interpolation.' % option) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if backtrail.has_key((key, section.name)): # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None: break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None: break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P<escaped>\$) | # Two $ signs (?P<named>[_a-z][_a-z0-9]*) | # $name format {(?P<braced>[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.iteritems(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # for the configspec self.configspec = {} self._order = [] self._configspec_comments = {} self._configspec_inline_comments = {} self._cs_section_comments = {} self._cs_section_inline_comments = {} # for defaults self.defaults = [] self.default_values = {} def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name is True: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use default name = DEFAULT_INTERPOLATION name = name.lower() # so that "Template", "template", etc. all work class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation and isinstance(val, StringTypes): return self._interpolate(key, val) return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. `unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, StringTypes): raise ValueError('The key "%s" is not a string.' % key) # add the comment if not self.comments.has_key(key): self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if not self.has_key(key): self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, dict) and not unrepr: # First create the new depth level, # then create the section if not self.has_key(key): self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if not self.has_key(key): self.scalars.append(key) if not self.main.stringify: if isinstance(value, StringTypes): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, StringTypes): raise TypeError('Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, *args): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ val = dict.pop(self, key, *args) if key in self.scalars: del self.comments[key] del self.inline_comments[key] self.scalars.remove(key) elif key in self.sections: del self.comments[key] del self.inline_comments[key] self.sections.remove(key) if self.main.interpolation and isinstance(val, StringTypes): return self._interpolate(key, val) return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = {} def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return zip((self.scalars + self.sections), self.values()) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(self.items()) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(self.values()) def __repr__(self): """x.__repr__() <==> repr(x)""" return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}} """ for key, val in indict.items(): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. .. caution:: You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg {'XXXXsection': {'XXXXkey': 'XXXXvalue'}} >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg {'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}} """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def decode(self, encoding): """ Decode all strings and values to unicode, using the specified encoding. Works with subsections and list values. Uses the ``walk`` method. Testing ``encode`` and ``decode``. >>> m = ConfigObj(a) >>> m.decode('ascii') >>> def testuni(val): ... for entry in val: ... if not isinstance(entry, unicode): ... print >> sys.stderr, type(entry) ... raise AssertionError, 'decode failed.' ... if isinstance(val[entry], dict): ... testuni(val[entry]) ... elif not isinstance(val[entry], unicode): ... raise AssertionError, 'decode failed.' >>> testuni(m) >>> m.encode('ascii') >>> a == m 1 """ warn('use of ``decode`` is deprecated.', DeprecationWarning) def decode(section, key, encoding=encoding, warn=True): """ """ val = section[key] if isinstance(val, (list, tuple)): newval = [] for entry in val: newval.append(entry.decode(encoding)) elif isinstance(val, dict): newval = val else: newval = val.decode(encoding) newkey = key.decode(encoding) section.rename(key, newkey) section[newkey] = newval # using ``call_on_sections`` allows us to modify section names self.walk(decode, call_on_sections=True) def encode(self, encoding): """ Encode all strings and values from unicode, using the specified encoding. Works with subsections and list values. Uses the ``walk`` method. """ warn('use of ``encode`` is deprecated.', DeprecationWarning) def encode(section, key, encoding=encoding): """ """ val = section[key] if isinstance(val, (list, tuple)): newval = [] for entry in val: newval.append(entry.encode(encoding)) elif isinstance(val, dict): newval = val else: newval = val.encode(encoding) newkey = key.encode(encoding) section.rename(key, newkey) section[newkey] = newval self.walk(encode, call_on_sections=True) def istrue(self, key): """A deprecated version of ``as_bool``.""" warn('use of ``istrue`` is deprecated. Use ``as_bool`` method ' 'instead.', DeprecationWarning) return self.as_bool(key) def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val is True: return True elif val is False: return False else: try: if not isinstance(val, StringTypes): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int(): fish >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int(): 3.2 """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') 3.2000000000000002 """ return float(self[key]) def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile(r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile(r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile(r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(?<!,)) # Empty value )? # last item in a list - or string value )| (,) # alternatively a single comma - empty list ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # use findall to get the members of a list value _listvalueexp = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#].*?) # unquoted ) \s*,\s* # comma ''', re.VERBOSE) # this regexp is used for the value # when lists are switched off _nolistvalue = re.compile(r'''^ ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"\#].*?)| # unquoted (?:) # Empty value ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # regexes for finding triple quoted values on one line _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$") _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$') _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$") _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$') _triple_quote = { "'''": (_single_line_single, _multi_line_single), '"""': (_single_line_double, _multi_line_double), } # Used by the ``istrue`` Section method _bools = { 'yes': True, 'no': False, 'on': True, 'off': False, '1': True, '0': False, 'true': True, 'false': False, } def __init__(self, infile=None, options=None, **kwargs): """ Parse a config file or create a config file object. ``ConfigObj(infile=None, options=None, **kwargs)`` """ # init the superclass Section.__init__(self, self, 0, self) if infile is None: infile = [] if options is None: options = {} else: options = dict(options) # keyword arguments take precedence over an options dictionary options.update(kwargs) defaults = OPTION_DEFAULTS.copy() # TODO: check the values too. for entry in options: if entry not in defaults: raise TypeError('Unrecognised option "%s".' % entry) # Add any explicit options to the defaults defaults.update(options) self._initialise(defaults) configspec = defaults['configspec'] self._original_configspec = configspec self._load(infile, configspec) def _load(self, infile, configspec): if isinstance(infile, StringTypes): self.filename = infile if os.path.isfile(infile): h = open(infile, 'rb') infile = h.read() or [] h.close() elif self.file_error: # raise an error if the file doesn't exist raise IOError('Config file not found: "%s".' % self.filename) else: # file doesn't already exist if self.create_empty: # this is a good test that the filename specified # isn't impossible - like on a non-existent device h = open(infile, 'w') h.write('') h.close() infile = [] elif isinstance(infile, (list, tuple)): infile = list(infile) elif isinstance(infile, dict): # initialise self # the Section class handles creating subsections if isinstance(infile, ConfigObj): # get a copy of our ConfigObj infile = infile.dict() for entry in infile: self[entry] = infile[entry] del self._errors if configspec is not None: self._handle_configspec(configspec) else: self.configspec = None return elif hasattr(infile, 'read'): # This supports file like objects infile = infile.read() or [] # needs splitting into lines - but needs doing *after* decoding # in case it's not an 8 bit encoding else: raise TypeError('infile must be a filename, file like object, or list of lines.') if infile: # don't do it for the empty ConfigObj infile = self._handle_bom(infile) # infile is now *always* a list # # Set the newlines attribute (first line ending it finds) # and strip trailing '\n' or '\r' from lines for line in infile: if (not line) or (line[-1] not in ('\r', '\n', '\r\n')): continue for end in ('\r\n', '\n', '\r'): if line.endswith(end): self.newlines = end break break infile = [line.rstrip('\r\n') for line in infile] self._parse(infile) # if we had any errors, now is the time to raise them if self._errors: info = "at line %s." % self._errors[0].line_number if len(self._errors) > 1: msg = "Parsing failed with several errors.\nFirst error %s" % info error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = {} # Clear section attributes as well Section._initialise(self) def __repr__(self): return ('ConfigObj({%s})' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)])) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in BOMS.items(): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in BOMS.items(): if not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, StringTypes): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, StringTypes): # infile read from a file will be a single string return infile.splitlines(True) return infile def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if self.encoding: return aString.decode('ascii') else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, StringTypes): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): if not isinstance(line, unicode): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, StringTypes): return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if parent.has_key(sect_name): self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: (value, comment, cur_index) = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if this_section.has_key(key): self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. Don't quote values that don't need it. Recursively quote members of a list and return a comma joined list. Multiline is ``False`` for lists. Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, StringTypes): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, list_values=False) except ConfigObjError, e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError, e: raise IOError('Reading configspec failed: %s' % e) self._set_configspec_value(configspec, self) def _set_configspec_value(self, configspec, section): """Used to recursively set configspec values.""" if '__many__' in configspec.sections: section.configspec['__many__'] = configspec['__many__'] if len(configspec.sections) > 1: # FIXME: can we supply any useful information here ? raise RepeatSectionError() if hasattr(configspec, 'initial_comment'): section._configspec_initial_comment = configspec.initial_comment section._configspec_final_comment = configspec.final_comment section._configspec_encoding = configspec.encoding section._configspec_BOM = configspec.BOM section._configspec_newlines = configspec.newlines section._configspec_indent_type = configspec.indent_type for entry in configspec.scalars: section._configspec_comments[entry] = configspec.comments[entry] section._configspec_inline_comments[entry] = configspec.inline_comments[entry] section.configspec[entry] = configspec[entry] section._order.append(entry) for entry in configspec.sections: if entry == '__many__': continue section._cs_section_comments[entry] = configspec.comments[entry] section._cs_section_inline_comments[entry] = configspec.inline_comments[entry] if not section.has_key(entry): section[entry] = {} self._set_configspec_value(configspec[entry], section[entry]) def _handle_repeat(self, section, configspec): """Dynamically assign configspec for repeated section.""" try: section_keys = configspec.sections scalar_keys = configspec.scalars except AttributeError: section_keys = [entry for entry in configspec if isinstance(configspec[entry], dict)] scalar_keys = [entry for entry in configspec if not isinstance(configspec[entry], dict)] if '__many__' in section_keys and len(section_keys) > 1: # FIXME: can we supply any useful information here ? raise RepeatSectionError() scalars = {} sections = {} for entry in scalar_keys: val = configspec[entry] scalars[entry] = val for entry in section_keys: val = configspec[entry] if entry == '__many__': scalars[entry] = val continue sections[entry] = val section.configspec = scalars for entry in sections: if not section.has_key(entry): section[entry] = {} self._handle_repeat(section[entry], sections[entry]) def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: h = open(self.filename, 'wb') h.write(output) h.close() def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self # spec_section = section.configspec if copy and hasattr(section, '_configspec_initial_comment'): section.initial_comment = section._configspec_initial_comment section.final_comment = section._configspec_final_comment section.encoding = section._configspec_encoding section.BOM = section._configspec_BOM section.newlines = section._configspec_newlines section.indent_type = section._configspec_indent_type if '__many__' in section.configspec: many = spec_section['__many__'] # dynamically assign the configspecs # for the sections below for entry in section.sections: self._handle_repeat(section[entry], many) # out = {} ret_true = True ret_false = True order = [k for k in section._order if k in spec_section] order += [k for k in spec_section if k not in order] for entry in order: if entry == '__many__': continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and not entry in section.scalars: # copy comments section.comments[entry] = ( section._configspec_comments.get(entry, [])) section.inline_comments[entry] = ( section._configspec_inline_comments.get(entry, '')) # else: missing = False val = section[entry] try: check = validator.check(spec_section[entry], val, missing=missing ) except validator.baseErrorClass, e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: try: section.default_values.pop(entry, None) except AttributeError: # For Python 2.2 compatibility try: del section.default_values[entry] except KeyError: pass if hasattr(validator, 'get_default_value'): try: section.default_values[entry] = validator.get_default_value(spec_section[entry]) except KeyError: # No default pass ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if copy: section.comments[entry] = section._cs_section_comments[entry] section.inline_comments[entry] = ( section._cs_section_inline_comments[entry]) check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check is False: ret_true = False elif check is True: ret_false = False else: ret_true = False ret_false = False # if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, StringTypes): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member # Check / processing functions for options def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function. Returns a list of keys that failed. Each member of the list is a tuple : :: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. >>> import validate >>> vtor = validate.Validator() >>> my_ini = ''' ... option1 = True ... [section1] ... option1 = True ... [section2] ... another_option = Probably ... [section3] ... another_option = True ... [[section3b]] ... value = 3 ... value2 = a ... value3 = 11 ... ''' >>> my_cfg = ''' ... option1 = boolean() ... option2 = boolean() ... option3 = boolean(default=Bad_value) ... [section1] ... option1 = boolean() ... option2 = boolean() ... option3 = boolean(default=Bad_value) ... [section2] ... another_option = boolean() ... [section3] ... another_option = boolean() ... [[section3b]] ... value = integer ... value2 = integer ... value3 = integer(0, 10) ... [[[section3b-sub]]] ... value = string ... [section4] ... another_option = boolean() ... ''' >>> cs = my_cfg.split('\\n') >>> ini = my_ini.split('\\n') >>> cfg = ConfigObj(ini, configspec=cs) >>> res = cfg.validate(vtor, preserve_errors=True) >>> errors = [] >>> for entry in flatten_errors(cfg, res): ... section_list, key, error = entry ... section_list.insert(0, '[root]') ... if key is not None: ... section_list.append(key) ... else: ... section_list.append('[missing]') ... section_string = ', '.join(section_list) ... errors.append((section_string, ' = ', error)) >>> errors.sort() >>> for entry in errors: ... print entry[0], entry[1], (entry[2] or 0) [root], option2 = 0 [root], option3 = the value "Bad_value" is of the wrong type. [root], section1, option2 = 0 [root], section1, option3 = the value "Bad_value" is of the wrong type. [root], section2, another_option = the value "Probably" is of the wrong type. [root], section3, section3b, section3b-sub, [missing] = 0 [root], section3, section3b, value2 = the value "a" is of the wrong type. [root], section3, section3b, value3 = the value "11" is too big. [root], section4, [missing] = 0 """ if levels is None: # first time called levels = [] results = [] if res is True: return results if res is False: results.append((levels[:], None, False)) if levels: levels.pop() return results for (key, val) in res.items(): if val == True: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results """*A programming language is a medium of expression.* - Paul Graham"""
gpl-3.0
mcepl/youtube-dl
devscripts/make_lazy_extractors.py
14
2840
from __future__ import unicode_literals, print_function from inspect import getsource import os from os.path import dirname as dirn import sys print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr) sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) lazy_extractors_filename = sys.argv[1] if os.path.exists(lazy_extractors_filename): os.remove(lazy_extractors_filename) from youtube_dl.extractor import _ALL_CLASSES from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor with open('devscripts/lazy_load_template.py', 'rt') as f: module_template = f.read() module_contents = [ module_template + '\n' + getsource(InfoExtractor.suitable) + '\n', 'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n'] ie_template = ''' class {name}({bases}): _VALID_URL = {valid_url!r} _module = '{module}' ''' make_valid_template = ''' @classmethod def _make_valid_url(cls): return {valid_url!r} ''' def get_base_name(base): if base is InfoExtractor: return 'LazyLoadExtractor' elif base is SearchInfoExtractor: return 'LazyLoadSearchExtractor' else: return base.__name__ def build_lazy_ie(ie, name): valid_url = getattr(ie, '_VALID_URL', None) s = ie_template.format( name=name, bases=', '.join(map(get_base_name, ie.__bases__)), valid_url=valid_url, module=ie.__module__) if ie.suitable.__func__ is not InfoExtractor.suitable.__func__: s += '\n' + getsource(ie.suitable) if hasattr(ie, '_make_valid_url'): # search extractors s += make_valid_template.format(valid_url=ie._make_valid_url()) return s # find the correct sorting and add the required base classes so that sublcasses # can be correctly created classes = _ALL_CLASSES[:-1] ordered_cls = [] while classes: for c in classes[:]: bases = set(c.__bases__) - set((object, InfoExtractor, SearchInfoExtractor)) stop = False for b in bases: if b not in classes and b not in ordered_cls: if b.__name__ == 'GenericIE': exit() classes.insert(0, b) stop = True if stop: break if all(b in ordered_cls for b in bases): ordered_cls.append(c) classes.remove(c) break ordered_cls.append(_ALL_CLASSES[-1]) names = [] for ie in ordered_cls: name = ie.__name__ src = build_lazy_ie(ie, name) module_contents.append(src) if ie in _ALL_CLASSES: names.append(name) module_contents.append( '_ALL_CLASSES = [{0}]'.format(', '.join(names))) module_src = '\n'.join(module_contents) + '\n' with open(lazy_extractors_filename, 'wt') as f: f.write(module_src)
unlicense
duke605/RunePy
commands/viswax.py
1
2553
from discord.ext import commands from datetime import datetime from bs4 import BeautifulSoup import discord import re class VisWax: def __init__(self, bot): self.bot = bot @commands.command(pass_context=True, aliases=['wax'], description='Shows the combination of runes needed for the Rune Goldberg Machine.') async def viswax(self, ctx): await self.bot.send_typing(ctx.message.channel) combo = await self.get_rune_combo() # Checking if updated if not combo: await self.bot.say("Today's rune combinations have no been updated yet. Please try again later.") return e = discord.Embed() e.colour = 0x3572a7 e.add_field(name='First Rune', value='\n'.join(combo[0]), inline=False) e.add_field(name='Second Rune', value='\n'.join(combo[1]), inline=False) await self.bot.say(embed=e) async def get_rune_combo(self): """ Gets the rune combo from cache or direct from the site """ # Getting html async with self.bot.whttp.get('http://services.runescape.com/m=forum/forums.ws?75,76,387,65763383') as r: text = await r.text() soup = BeautifulSoup(text, 'html.parser') post = soup.find_all('span', attrs={'class': 'forum-post__body'})[1].text.lower() day = re.search('combination\s+for\s+.+\s+(\d+).+;', post).group(1) slot1, slot2 = re.search('slot 1:- (.+?) slot 2:- (.+?) slot', post).groups() # Checking if runes updated yet if int(day) != datetime.utcnow().day: return None # Cleaning up the data slot1 = [VisWax.clean_format(r.strip().capitalize()) for r in slot1.split('-')] slot2 = [VisWax.clean_format(r.strip().capitalize()) for r in slot2.split('-')] return slot1, slot2 @staticmethod def clean_format(s: str): # No IM/HCIM runes to clean if '(' not in s: return s try: # Trying to parse properly formated data rune, alts = re.search('(.+)\s?\((.+)\)', s).groups() except: # Have to put this is because the idiot forgets to close his brackets sometimes # (you know who forgets to close brackets? Psychopaths that's who) rune, alts = re.search('(.+)\s?\((.+)', s).groups() alts = [a.strip().capitalize() for a in alts.split(',')] return '%s (%s)' % (rune, ', '.join(alts)) def setup(bot): bot.add_cog(VisWax(bot))
mit
adafruit/micropython
py/makeqstrdefs.py
1
5324
""" This script processes the output from the C preprocessor and extracts all qstr. Each qstr is transformed into a qstr definition of the form 'Q(...)'. This script works with Python 2.6, 2.7, 3.3 and 3.4. """ from __future__ import print_function import re import sys import os # Python 2/3 compatibility: # - iterating through bytes is different # - codepoint2name lives in a different module import platform if platform.python_version_tuple()[0] == '2': bytes_cons = lambda val, enc=None: bytearray(val) from htmlentitydefs import name2codepoint elif platform.python_version_tuple()[0] == '3': bytes_cons = bytes from html.entities import name2codepoint unichr = chr # end compatibility code # Blacklist of qstrings that are specially handled in further # processing and should be ignored QSTRING_BLACK_LIST = set(['NULL', 'number_of']) # add some custom names to map characters that aren't in HTML name2codepoint['hyphen'] = ord('-') name2codepoint['space'] = ord(' ') name2codepoint['squot'] = ord('\'') name2codepoint['comma'] = ord(',') name2codepoint['dot'] = ord('.') name2codepoint['colon'] = ord(':') name2codepoint['semicolon'] = ord(';') name2codepoint['slash'] = ord('/') name2codepoint['percent'] = ord('%') name2codepoint['hash'] = ord('#') name2codepoint['paren_open'] = ord('(') name2codepoint['paren_close'] = ord(')') name2codepoint['bracket_open'] = ord('[') name2codepoint['bracket_close'] = ord(']') name2codepoint['brace_open'] = ord('{') name2codepoint['brace_close'] = ord('}') name2codepoint['star'] = ord('*') name2codepoint['bang'] = ord('!') name2codepoint['backslash'] = ord('\\') name2codepoint['plus'] = ord('+') name2codepoint['dollar'] = ord('$') name2codepoint['equals'] = ord('=') name2codepoint['question'] = ord('?') name2codepoint['at_sign'] = ord('@') name2codepoint['caret'] = ord('^') name2codepoint['pipe'] = ord('|') name2codepoint['tilde'] = ord('~') def write_out(fname, output): if output: for m, r in [("/", "__"), ("\\", "__"), (":", "@"), ("..", "@@")]: fname = fname.replace(m, r) with open(args.output_dir + "/" + fname + ".qstr", "w") as f: f.write("\n".join(output) + "\n") def qstr_unescape(qstr): for name in name2codepoint: if "__" + name + "__" in qstr: continue if "_" + name + "_" in qstr: qstr = qstr.replace("_" + name + "_", str(unichr(name2codepoint[name]))) return qstr def process_file(f): re_line = re.compile(r"#[line]*\s(\d+)\s\"([^\"]+)\"") re_qstr = re.compile(r'MP_QSTR_[_a-zA-Z0-9]+') re_translate = re.compile(r'translate\(\"((?:(?=(\\?))\2.)*?)\"\)') output = [] last_fname = None lineno = 0 for line in f: if line.isspace(): continue # match gcc-like output (# n "file") and msvc-like output (#line n "file") if line.startswith(('# ', '#line')): m = re_line.match(line) assert m is not None #print(m.groups()) lineno = int(m.group(1)) fname = m.group(2) if not fname.endswith(".c"): continue if fname != last_fname: write_out(last_fname, output) output = [] last_fname = fname continue for match in re_qstr.findall(line): name = match.replace('MP_QSTR_', '') if name not in QSTRING_BLACK_LIST: output.append('Q(' + qstr_unescape(name) + ')') for match in re_translate.findall(line): output.append('TRANSLATE("' + match[0] + '")') lineno += 1 write_out(last_fname, output) return "" def cat_together(): import glob import hashlib hasher = hashlib.md5() all_lines = [] outf = open(args.output_dir + "/out", "wb") for fname in glob.glob(args.output_dir + "/*.qstr"): with open(fname, "rb") as f: lines = f.readlines() all_lines += lines all_lines.sort() all_lines = b"\n".join(all_lines) outf.write(all_lines) outf.close() hasher.update(all_lines) new_hash = hasher.hexdigest() #print(new_hash) old_hash = None try: with open(args.output_file + ".hash") as f: old_hash = f.read() except IOError: pass if old_hash != new_hash: print("QSTR updated") try: # rename below might fail if file exists os.remove(args.output_file) except: pass os.rename(args.output_dir + "/out", args.output_file) with open(args.output_file + ".hash", "w") as f: f.write(new_hash) else: print("QSTR not updated") if __name__ == "__main__": if len(sys.argv) != 5: print('usage: %s command input_filename output_dir output_file' % sys.argv[0]) sys.exit(2) class Args: pass args = Args() args.command = sys.argv[1] args.input_filename = sys.argv[2] args.output_dir = sys.argv[3] args.output_file = sys.argv[4] try: os.makedirs(args.output_dir) except OSError: pass if args.command == "split": with open(args.input_filename) as infile: process_file(infile) if args.command == "cat": cat_together()
mit
nkoep/pymanopt
examples/dominant_eigenvector.py
1
1598
import numpy as np import theano.tensor as T from numpy import linalg as la, random as rnd import pymanopt from pymanopt.manifolds import Sphere from pymanopt.solvers import ConjugateGradient def dominant_eigenvector(A): """ Returns the dominant eigenvector of the symmetric matrix A. Note: For the same A, this should yield the same as the dominant invariant subspace example with p = 1. """ m, n = A.shape assert m == n, "matrix must be square" assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric" manifold = Sphere(n) solver = ConjugateGradient(maxiter=500, minstepsize=1e-6) x = T.vector() @pymanopt.function.Theano(x) def cost(x): return -x.T.dot(T.dot(A, x)) problem = pymanopt.Problem(manifold, cost) xopt = solver.solve(problem) return xopt.squeeze() if __name__ == "__main__": # Generate random problem data. n = 128 A = rnd.randn(n, n) A = 0.5 * (A + A.T) # Calculate the actual solution by a conventional eigenvalue decomposition. w, v = la.eig(A) x = v[:, np.argmax(w)] # Solve the problem with pymanopt. xopt = dominant_eigenvector(A) # Make sure both vectors have the same direction. Both are valid # eigenvectors, but for comparison we need to get rid of the sign # ambiguity. if np.sign(x[0]) != np.sign(xopt[0]): xopt = -xopt # Print information about the solution. print('') print("l2-norm of x: %f" % la.norm(x)) print("l2-norm of xopt: %f" % la.norm(xopt)) print("l2-error: %f" % la.norm(x - xopt))
bsd-3-clause
mluke93/osf.io
scripts/osfstorage/correct_moved_node_settings.py
18
1767
import sys import logging from scripts import utils as script_utils from framework.transactions.context import TokuTransaction from website.app import init_app from website.addons.osfstorage import model logger = logging.getLogger(__name__) def do_migration(): count = 0 errored = 0 for node_settings in model.OsfStorageNodeSettings.find(): root = model.OsfStorageFileNode.load(node_settings.to_storage()['root_node']) for child in iter_children(root): if child.node_settings != node_settings: logger.info('Update node_settings for {!r} in project {!r}'.format(child, node_settings.owner)) child.node_settings = node_settings try: child.save() except Exception as err: errored += 1 logger.error('Error occurred while updating {!r}'.format(child)) logger.exception(err) logger.error('Skipping...') else: count += 1 logger.info('Updated: {} file nodes'.format(count)) logger.info('Errored: {} file nodes'.format(errored)) def iter_children(file_node): to_go = [file_node] while to_go: for child in to_go.pop(0).children: if child.is_collection: to_go.append(child) yield child def main(dry=True): init_app(set_backends=True, routes=False) # Sets the storage backends on all models with TokuTransaction(): do_migration() if dry: raise Exception('Abort Transaction - Dry Run') if __name__ == '__main__': dry = 'dry' in sys.argv if not dry: script_utils.add_file_logger(logger, __file__) main(dry=dry)
apache-2.0
mitnk/letsencrypt
acme/acme/challenges.py
6
16438
"""ACME Identifier Validation Challenges.""" import abc import functools import hashlib import logging import socket from cryptography.hazmat.primitives import hashes import OpenSSL import requests from acme import errors from acme import crypto_util from acme import fields from acme import jose logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods class Challenge(jose.TypedJSONObjectWithFields): # _fields_to_partial_json | pylint: disable=abstract-method """ACME challenge.""" TYPES = {} @classmethod def from_json(cls, jobj): try: return super(Challenge, cls).from_json(jobj) except jose.UnrecognizedTypeError as error: logger.debug(error) return UnrecognizedChallenge.from_json(jobj) class ChallengeResponse(jose.TypedJSONObjectWithFields): # _fields_to_partial_json | pylint: disable=abstract-method """ACME challenge response.""" TYPES = {} resource_type = 'challenge' resource = fields.Resource(resource_type) class UnrecognizedChallenge(Challenge): """Unrecognized challenge. ACME specification defines a generic framework for challenges and defines some standard challenges that are implemented in this module. However, other implementations (including peers) might define additional challenge types, which should be ignored if unrecognized. :ivar jobj: Original JSON decoded object. """ def __init__(self, jobj): super(UnrecognizedChallenge, self).__init__() object.__setattr__(self, "jobj", jobj) def to_partial_json(self): # pylint: disable=no-member return self.jobj @classmethod def from_json(cls, jobj): return cls(jobj) class _TokenChallenge(Challenge): """Challenge with token. :ivar bytes token: """ TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec """Minimum size of the :attr:`token` in bytes.""" # TODO: acme-spec doesn't specify token as base64-encoded value token = jose.Field( "token", encoder=jose.encode_b64jose, decoder=functools.partial( jose.decode_b64jose, size=TOKEN_SIZE, minimum=True)) # XXX: rename to ~token_good_for_url @property def good_token(self): # XXX: @token.decoder """Is `token` good? .. todo:: acme-spec wants "It MUST NOT contain any non-ASCII characters", but it should also warrant that it doesn't contain ".." or "/"... """ # TODO: check that path combined with uri does not go above # URI_ROOT_PATH! return b'..' not in self.token and b'/' not in self.token class KeyAuthorizationChallengeResponse(ChallengeResponse): """Response to Challenges based on Key Authorization. :param unicode key_authorization: """ key_authorization = jose.Field("keyAuthorization") thumbprint_hash_function = hashes.SHA256 def verify(self, chall, account_public_key): """Verify the key authorization. :param KeyAuthorization chall: Challenge that corresponds to this response. :param JWK account_public_key: :return: ``True`` iff verification of the key authorization was successful. :rtype: bool """ parts = self.key_authorization.split('.') # pylint: disable=no-member if len(parts) != 2: logger.debug("Key authorization (%r) is not well formed", self.key_authorization) return False if parts[0] != chall.encode("token"): logger.debug("Mismatching token in key authorization: " "%r instead of %r", parts[0], chall.encode("token")) return False thumbprint = jose.b64encode(account_public_key.thumbprint( hash_function=self.thumbprint_hash_function)).decode() if parts[1] != thumbprint: logger.debug("Mismatching thumbprint in key authorization: " "%r instead of %r", parts[0], thumbprint) return False return True class KeyAuthorizationChallenge(_TokenChallenge): # pylint: disable=abstract-class-little-used,too-many-ancestors """Challenge based on Key Authorization. :param response_cls: Subclass of `KeyAuthorizationChallengeResponse` that will be used to generate `response`. """ __metaclass__ = abc.ABCMeta response_cls = NotImplemented thumbprint_hash_function = ( KeyAuthorizationChallengeResponse.thumbprint_hash_function) def key_authorization(self, account_key): """Generate Key Authorization. :param JWK account_key: :rtype unicode: """ return self.encode("token") + "." + jose.b64encode( account_key.thumbprint( hash_function=self.thumbprint_hash_function)).decode() def response(self, account_key): """Generate response to the challenge. :param JWK account_key: :returns: Response (initialized `response_cls`) to the challenge. :rtype: KeyAuthorizationChallengeResponse """ return self.response_cls( key_authorization=self.key_authorization(account_key)) @abc.abstractmethod def validation(self, account_key, **kwargs): """Generate validation for the challenge. Subclasses must implement this method, but they are likely to return completely different data structures, depending on what's necessary to complete the challenge. Interepretation of that return value must be known to the caller. :param JWK account_key: :returns: Challenge-specific validation. """ raise NotImplementedError() # pragma: no cover def response_and_validation(self, account_key, *args, **kwargs): """Generate response and validation. Convenience function that return results of `response` and `validation`. :param JWK account_key: :rtype: tuple """ return (self.response(account_key), self.validation(account_key, *args, **kwargs)) @ChallengeResponse.register class HTTP01Response(KeyAuthorizationChallengeResponse): """ACME http-01 challenge response.""" typ = "http-01" PORT = 80 """Verification port as defined by the protocol. You can override it (e.g. for testing) by passing ``port`` to `simple_verify`. """ WHITESPACE_CUTSET = "\n\r\t " """Whitespace characters which should be ignored at the end of the body.""" def simple_verify(self, chall, domain, account_public_key, port=None): """Simple verify. :param challenges.SimpleHTTP chall: Corresponding challenge. :param unicode domain: Domain name being verified. :param JWK account_public_key: Public key for the key pair being authorized. :param int port: Port used in the validation. :returns: ``True`` iff validation is successful, ``False`` otherwise. :rtype: bool """ if not self.verify(chall, account_public_key): logger.debug("Verification of key authorization in response failed") return False # TODO: ACME specification defines URI template that doesn't # allow to use a custom port... Make sure port is not in the # request URI, if it's standard. if port is not None and port != self.PORT: logger.warning( "Using non-standard port for http-01 verification: %s", port) domain += ":{0}".format(port) uri = chall.uri(domain) logger.debug("Verifying %s at %s...", chall.typ, uri) try: http_response = requests.get(uri) except requests.exceptions.RequestException as error: logger.error("Unable to reach %s: %s", uri, error) return False logger.debug("Received %s: %s. Headers: %s", http_response, http_response.text, http_response.headers) challenge_response = http_response.text.rstrip(self.WHITESPACE_CUTSET) if self.key_authorization != challenge_response: logger.debug("Key authorization from response (%r) doesn't match " "HTTP response (%r)", self.key_authorization, challenge_response) return False return True @Challenge.register # pylint: disable=too-many-ancestors class HTTP01(KeyAuthorizationChallenge): """ACME http-01 challenge.""" response_cls = HTTP01Response typ = response_cls.typ URI_ROOT_PATH = ".well-known/acme-challenge" """URI root path for the server provisioned resource.""" @property def path(self): """Path (starting with '/') for provisioned resource. :rtype: string """ return '/' + self.URI_ROOT_PATH + '/' + self.encode('token') def uri(self, domain): """Create an URI to the provisioned resource. Forms an URI to the HTTPS server provisioned resource (containing :attr:`~SimpleHTTP.token`). :param unicode domain: Domain name being verified. :rtype: string """ return "http://" + domain + self.path def validation(self, account_key, **unused_kwargs): """Generate validation. :param JWK account_key: :rtype: unicode """ return self.key_authorization(account_key) @ChallengeResponse.register class TLSSNI01Response(KeyAuthorizationChallengeResponse): """ACME tls-sni-01 challenge response.""" typ = "tls-sni-01" DOMAIN_SUFFIX = b".acme.invalid" """Domain name suffix.""" PORT = 443 """Verification port as defined by the protocol. You can override it (e.g. for testing) by passing ``port`` to `simple_verify`. """ @property def z(self): # pylint: disable=invalid-name """``z`` value used for verification. :rtype bytes: """ return hashlib.sha256( self.key_authorization.encode("utf-8")).hexdigest().lower().encode() @property def z_domain(self): """Domain name used for verification, generated from `z`. :rtype bytes: """ return self.z[:32] + b'.' + self.z[32:] + self.DOMAIN_SUFFIX def gen_cert(self, key=None, bits=2048): """Generate tls-sni-01 certificate. :param OpenSSL.crypto.PKey key: Optional private key used in certificate generation. If not provided (``None``), then fresh key will be generated. :param int bits: Number of bits for newly generated key. :rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey` """ if key is None: key = OpenSSL.crypto.PKey() key.generate_key(OpenSSL.crypto.TYPE_RSA, bits) return crypto_util.gen_ss_cert(key, [ # z_domain is too big to fit into CN, hence first dummy domain 'dummy', self.z_domain.decode()], force_san=True), key def probe_cert(self, domain, **kwargs): """Probe tls-sni-01 challenge certificate. :param unicode domain: """ # TODO: domain is not necessary if host is provided if "host" not in kwargs: host = socket.gethostbyname(domain) logging.debug('%s resolved to %s', domain, host) kwargs["host"] = host kwargs.setdefault("port", self.PORT) kwargs["name"] = self.z_domain # TODO: try different methods? # pylint: disable=protected-access return crypto_util.probe_sni(**kwargs) def verify_cert(self, cert): """Verify tls-sni-01 challenge certificate. :param OpensSSL.crypto.X509 cert: Challenge certificate. :returns: Whether the certificate was successfully verified. :rtype: bool """ # pylint: disable=protected-access sans = crypto_util._pyopenssl_cert_or_req_san(cert) logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans) return self.z_domain.decode() in sans def simple_verify(self, chall, domain, account_public_key, cert=None, **kwargs): """Simple verify. Verify ``validation`` using ``account_public_key``, optionally probe tls-sni-01 certificate and check using `verify_cert`. :param .challenges.TLSSNI01 chall: Corresponding challenge. :param str domain: Domain name being validated. :param JWK account_public_key: :param OpenSSL.crypto.X509 cert: Optional certificate. If not provided (``None``) certificate will be retrieved using `probe_cert`. :param int port: Port used to probe the certificate. :returns: ``True`` iff client's control of the domain has been verified, ``False`` otherwise. :rtype: bool """ if not self.verify(chall, account_public_key): logger.debug("Verification of key authorization in response failed") return False if cert is None: try: cert = self.probe_cert(domain=domain, **kwargs) except errors.Error as error: logger.debug(error, exc_info=True) return False return self.verify_cert(cert) @Challenge.register # pylint: disable=too-many-ancestors class TLSSNI01(KeyAuthorizationChallenge): """ACME tls-sni-01 challenge.""" response_cls = TLSSNI01Response typ = response_cls.typ # boulder#962, ietf-wg-acme#22 #n = jose.Field("n", encoder=int, decoder=int) def validation(self, account_key, **kwargs): """Generate validation. :param JWK account_key: :param OpenSSL.crypto.PKey cert_key: Optional private key used in certificate generation. If not provided (``None``), then fresh key will be generated. :rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey` """ return self.response(account_key).gen_cert(key=kwargs.get('cert_key')) @Challenge.register # pylint: disable=too-many-ancestors class DNS(_TokenChallenge): """ACME "dns" challenge.""" typ = "dns" LABEL = "_acme-challenge" """Label clients prepend to the domain name being validated.""" def gen_validation(self, account_key, alg=jose.RS256, **kwargs): """Generate validation. :param .JWK account_key: Private account key. :param .JWA alg: :returns: This challenge wrapped in `.JWS` :rtype: .JWS """ return jose.JWS.sign( payload=self.json_dumps(sort_keys=True).encode('utf-8'), key=account_key, alg=alg, **kwargs) def check_validation(self, validation, account_public_key): """Check validation. :param JWS validation: :param JWK account_public_key: :rtype: bool """ if not validation.verify(key=account_public_key): return False try: return self == self.json_loads( validation.payload.decode('utf-8')) except jose.DeserializationError as error: logger.debug("Checking validation for DNS failed: %s", error) return False def gen_response(self, account_key, **kwargs): """Generate response. :param .JWK account_key: Private account key. :param .JWA alg: :rtype: DNSResponse """ return DNSResponse(validation=self.gen_validation( self, account_key, **kwargs)) def validation_domain_name(self, name): """Domain name for TXT validation record. :param unicode name: Domain name being validated. """ return "{0}.{1}".format(self.LABEL, name) @ChallengeResponse.register class DNSResponse(ChallengeResponse): """ACME "dns" challenge response. :param JWS validation: """ typ = "dns" validation = jose.Field("validation", decoder=jose.JWS.from_json) def check_validation(self, chall, account_public_key): """Check validation. :param challenges.DNS chall: :param JWK account_public_key: :rtype: bool """ return chall.check_validation(self.validation, account_public_key)
apache-2.0
maelnor/nova
nova/tests/api/openstack/compute/contrib/test_migrate_server.py
9
10692
# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack.compute.plugins.v3 import migrate_server from nova import exception from nova.openstack.common import uuidutils from nova.tests.api.openstack.compute.plugins.v3 import \ admin_only_action_common from nova.tests.api.openstack import fakes class MigrateServerTests(admin_only_action_common.CommonTests): def setUp(self): super(MigrateServerTests, self).setUp() self.controller = migrate_server.MigrateServerController() self.compute_api = self.controller.compute_api def _fake_controller(*args, **kwargs): return self.controller self.stubs.Set(migrate_server, 'MigrateServerController', _fake_controller) self.app = fakes.wsgi_app_v3(init_only=('servers', 'os-migrate-server'), fake_auth_context=self.context) self.mox.StubOutWithMock(self.compute_api, 'get') def test_migrate(self): method_translations = {'migrate': 'resize', 'os-migrateLive': 'live_migrate'} body_map = {'os-migrateLive': {'host': 'hostname', 'block_migration': False, 'disk_over_commit': False}} args_map = {'os-migrateLive': ((False, False, 'hostname'), {})} self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map, method_translations=method_translations, args_map=args_map) def test_migrate_none_hostname(self): method_translations = {'migrate': 'resize', 'os-migrateLive': 'live_migrate'} body_map = {'os-migrateLive': {'host': None, 'block_migration': False, 'disk_over_commit': False}} args_map = {'os-migrateLive': ((False, False, None), {})} self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map, method_translations=method_translations, args_map=args_map) def test_migrate_with_non_existed_instance(self): body_map = {'os-migrateLive': {'host': 'hostname', 'block_migration': False, 'disk_over_commit': False}} self._test_actions_with_non_existed_instance( ['migrate', 'os-migrateLive'], body_map=body_map) def test_migrate_raise_conflict_on_invalid_state(self): method_translations = {'migrate': 'resize', 'os-migrateLive': 'live_migrate'} body_map = {'os-migrateLive': {'host': 'hostname', 'block_migration': False, 'disk_over_commit': False}} args_map = {'os-migrateLive': ((False, False, 'hostname'), {})} self._test_actions_raise_conflict_on_invalid_state( ['migrate', 'os-migrateLive'], body_map=body_map, args_map=args_map, method_translations=method_translations) def test_actions_with_locked_instance(self): method_translations = {'migrate': 'resize', 'os-migrateLive': 'live_migrate'} body_map = {'os-migrateLive': {'host': 'hostname', 'block_migration': False, 'disk_over_commit': False}} args_map = {'os-migrateLive': ((False, False, 'hostname'), {})} self._test_actions_with_locked_instance( ['migrate', 'os-migrateLive'], body_map=body_map, args_map=args_map, method_translations=method_translations) def _test_migrate_exception(self, exc_info, expected_result): self.mox.StubOutWithMock(self.compute_api, 'resize') instance = self._stub_instance_get() self.compute_api.resize(self.context, instance).AndRaise(exc_info) self.mox.ReplayAll() res = self._make_request('/servers/%s/action' % instance['uuid'], {'migrate': None}) self.assertEqual(expected_result, res.status_int) def test_migrate_too_many_instances(self): exc_info = exception.TooManyInstances(overs='', req='', used=0, allowed=0, resource='') self._test_migrate_exception(exc_info, 403) def _test_migrate_live_succeeded(self, param): self.mox.StubOutWithMock(self.compute_api, 'live_migrate') instance = self._stub_instance_get() self.compute_api.live_migrate(self.context, instance, False, False, 'hostname') self.mox.ReplayAll() res = self._make_request('/servers/%s/action' % instance.uuid, {'os-migrateLive': param}) self.assertEqual(202, res.status_int) def test_migrate_live_enabled(self): param = {'host': 'hostname', 'block_migration': False, 'disk_over_commit': False} self._test_migrate_live_succeeded(param) def test_migrate_live_enabled_with_string_param(self): param = {'host': 'hostname', 'block_migration': "False", 'disk_over_commit': "False"} self._test_migrate_live_succeeded(param) def test_migrate_live_without_host(self): res = self._make_request('/servers/FAKE/action', {'os-migrateLive': {'block_migration': False, 'disk_over_commit': False}}) self.assertEqual(400, res.status_int) def test_migrate_live_without_block_migration(self): res = self._make_request('/servers/FAKE/action', {'os-migrateLive': {'host': 'hostname', 'disk_over_commit': False}}) self.assertEqual(400, res.status_int) def test_migrate_live_without_disk_over_commit(self): res = self._make_request('/servers/FAKE/action', {'os-migrateLive': {'host': 'hostname', 'block_migration': False}}) self.assertEqual(400, res.status_int) def test_migrate_live_with_invalid_block_migration(self): res = self._make_request('/servers/FAKE/action', {'os-migrateLive': {'host': 'hostname', 'block_migration': "foo", 'disk_over_commit': False}}) self.assertEqual(400, res.status_int) def test_migrate_live_with_invalid_disk_over_commit(self): res = self._make_request('/servers/FAKE/action', {'os-migrateLive': {'host': 'hostname', 'block_migration': False, 'disk_over_commit': "foo"}}) self.assertEqual(400, res.status_int) def _test_migrate_live_failed_with_exception(self, fake_exc, uuid=None): self.mox.StubOutWithMock(self.compute_api, 'live_migrate') instance = self._stub_instance_get(uuid=uuid) self.compute_api.live_migrate(self.context, instance, False, False, 'hostname').AndRaise(fake_exc) self.mox.ReplayAll() res = self._make_request('/servers/%s/action' % instance.uuid, {'os-migrateLive': {'host': 'hostname', 'block_migration': False, 'disk_over_commit': False}}) self.assertEqual(400, res.status_int) self.assertIn(unicode(fake_exc), res.body) def test_migrate_live_compute_service_unavailable(self): self._test_migrate_live_failed_with_exception( exception.ComputeServiceUnavailable(host='host')) def test_migrate_live_invalid_hypervisor_type(self): self._test_migrate_live_failed_with_exception( exception.InvalidHypervisorType()) def test_migrate_live_invalid_cpu_info(self): self._test_migrate_live_failed_with_exception( exception.InvalidCPUInfo(reason="")) def test_migrate_live_unable_to_migrate_to_self(self): uuid = uuidutils.generate_uuid() self._test_migrate_live_failed_with_exception( exception.UnableToMigrateToSelf(instance_id=uuid, host='host'), uuid=uuid) def test_migrate_live_destination_hypervisor_too_old(self): self._test_migrate_live_failed_with_exception( exception.DestinationHypervisorTooOld()) def test_migrate_live_no_valid_host(self): self._test_migrate_live_failed_with_exception( exception.NoValidHost(reason='')) def test_migrate_live_invalid_local_storage(self): self._test_migrate_live_failed_with_exception( exception.InvalidLocalStorage(path='', reason='')) def test_migrate_live_invalid_shared_storage(self): self._test_migrate_live_failed_with_exception( exception.InvalidSharedStorage(path='', reason='')) def test_migrate_live_hypervisor_unavailable(self): self._test_migrate_live_failed_with_exception( exception.HypervisorUnavailable(host="")) def test_migrate_live_instance_not_running(self): self._test_migrate_live_failed_with_exception( exception.InstanceNotRunning(instance_id="")) def test_migrate_live_pre_check_error(self): self._test_migrate_live_failed_with_exception( exception.MigrationPreCheckError(reason=''))
apache-2.0
mith1979/ansible_automation
applied_python/applied_python/lib/python2.7/site-packages/django/db/migrations/executor.py
71
10728
from __future__ import unicode_literals from django.apps.registry import apps as global_apps from django.db import migrations from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor(object): """ End-to-end migration execution - loads migrations, and runs them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, returns a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = set() else: applied = set(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied.add(migration) return plan def migrate(self, targets, plan=None, fake=False, fake_initial=False): """ Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ if plan is None: plan = self.migration_plan(targets) migrations_to_run = {m[0] for m in plan} # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) # Holds all states right before a migration is applied # if the migration is being run. states = {} state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) if self.progress_callback: self.progress_callback("render_start") # Phase 1 -- Store all project states of migrations right before they # are applied. The first migration that will be applied in phase 2 will # trigger the rendering of the initial project state. From this time on # models will be recursively reloaded as explained in # `django.db.migrations.state.get_related_models_recursive()`. for migration, _ in full_plan: if not migrations_to_run: # We remove every migration whose state was already computed # from the set below (`migrations_to_run.remove(migration)`). # If no states for migrations must be computed, we can exit # this loop. Migrations that occur after the latest migration # that is about to be applied would only trigger unneeded # mutate_state() calls. break do_run = migration in migrations_to_run if do_run: if 'apps' not in state.__dict__: state.apps # Render all real_apps -- performance critical states[migration] = state.clone() migrations_to_run.remove(migration) # Only preserve the state if the migration is being run later state = migration.mutate_state(state, preserve=do_run) if self.progress_callback: self.progress_callback("render_success") # Phase 2 -- Run the migrations for migration, backwards in plan: if not backwards: self.apply_migration(states[migration], migration, fake=fake, fake_initial=fake_initial) else: self.unapply_migration(states[migration], migration, fake=fake) self.check_replacements() def collect_sql(self, plan): """ Takes a migration plan and returns a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True) as schema_editor: if state is None: state = self.loader.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements def apply_migration(self, state, migration, fake=False, fake_initial=False): """ Runs a migration forwards. """ if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor() as schema_editor: state = migration.apply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def unapply_migration(self, state, migration, fake=False): """ Runs a migration backwards. """ if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor() as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) else: self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def check_replacements(self): """ Mark replacement migrations applied if their replaced set all are. We do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, so as to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but we still want to correctly maintain the applied state of the squash migration. """ applied = self.recorder.applied_migrations() for key, migration in self.loader.replacements.items(): all_applied = all(m in applied for m in migration.replaces) if all_applied and key not in applied: self.recorder.record_applied(*key) def detect_soft_applied(self, project_state, migration): """ Tests whether a migration has been implicitly applied - that the tables it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel). """ # Bail if the migration isn't the first one in its app if [name for app, name in migration.dependencies if app == migration.app_label]: return False, project_state if project_state is None: after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_migration = False # Make sure all create model are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()): return False, project_state found_create_migration = True # If we get this far and we found at least one CreateModel migration, # the migration is considered implicitly applied. return found_create_migration, after_state
apache-2.0
flyher/pymo
android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_index.py
84
9958
import unittest from test import test_support import operator from sys import maxint maxsize = test_support.MAX_Py_ssize_t minsize = -maxsize-1 class oldstyle: def __index__(self): return self.ind class newstyle(object): def __index__(self): return self.ind class TrapInt(int): def __index__(self): return self class TrapLong(long): def __index__(self): return self class BaseTestCase(unittest.TestCase): def setUp(self): self.o = oldstyle() self.n = newstyle() def test_basic(self): self.o.ind = -2 self.n.ind = 2 self.assertEqual(operator.index(self.o), -2) self.assertEqual(operator.index(self.n), 2) def test_slice(self): self.o.ind = 1 self.n.ind = 2 slc = slice(self.o, self.o, self.o) check_slc = slice(1, 1, 1) self.assertEqual(slc.indices(self.o), check_slc.indices(1)) slc = slice(self.n, self.n, self.n) check_slc = slice(2, 2, 2) self.assertEqual(slc.indices(self.n), check_slc.indices(2)) def test_wrappers(self): self.o.ind = 4 self.n.ind = 5 self.assertEqual(6 .__index__(), 6) self.assertEqual(-7L.__index__(), -7) self.assertEqual(self.o.__index__(), 4) self.assertEqual(self.n.__index__(), 5) self.assertEqual(True.__index__(), 1) self.assertEqual(False.__index__(), 0) def test_subclasses(self): r = range(10) self.assertEqual(r[TrapInt(5):TrapInt(10)], r[5:10]) self.assertEqual(r[TrapLong(5):TrapLong(10)], r[5:10]) self.assertEqual(slice(TrapInt()).indices(0), (0,0,1)) self.assertEqual(slice(TrapLong(0)).indices(0), (0,0,1)) def test_error(self): self.o.ind = 'dumb' self.n.ind = 'bad' self.assertRaises(TypeError, operator.index, self.o) self.assertRaises(TypeError, operator.index, self.n) self.assertRaises(TypeError, slice(self.o).indices, 0) self.assertRaises(TypeError, slice(self.n).indices, 0) class SeqTestCase(unittest.TestCase): # This test case isn't run directly. It just defines common tests # to the different sequence types below def setUp(self): self.o = oldstyle() self.n = newstyle() self.o2 = oldstyle() self.n2 = newstyle() def test_index(self): self.o.ind = -2 self.n.ind = 2 self.assertEqual(self.seq[self.n], self.seq[2]) self.assertEqual(self.seq[self.o], self.seq[-2]) def test_slice(self): self.o.ind = 1 self.o2.ind = 3 self.n.ind = 2 self.n2.ind = 4 self.assertEqual(self.seq[self.o:self.o2], self.seq[1:3]) self.assertEqual(self.seq[self.n:self.n2], self.seq[2:4]) def test_slice_bug7532(self): seqlen = len(self.seq) self.o.ind = int(seqlen * 1.5) self.n.ind = seqlen + 2 self.assertEqual(self.seq[self.o:], self.seq[0:0]) self.assertEqual(self.seq[:self.o], self.seq) self.assertEqual(self.seq[self.n:], self.seq[0:0]) self.assertEqual(self.seq[:self.n], self.seq) if isinstance(self.seq, ClassicSeq): return # These tests fail for ClassicSeq (see bug #7532) self.o2.ind = -seqlen - 2 self.n2.ind = -int(seqlen * 1.5) self.assertEqual(self.seq[self.o2:], self.seq) self.assertEqual(self.seq[:self.o2], self.seq[0:0]) self.assertEqual(self.seq[self.n2:], self.seq) self.assertEqual(self.seq[:self.n2], self.seq[0:0]) def test_repeat(self): self.o.ind = 3 self.n.ind = 2 self.assertEqual(self.seq * self.o, self.seq * 3) self.assertEqual(self.seq * self.n, self.seq * 2) self.assertEqual(self.o * self.seq, self.seq * 3) self.assertEqual(self.n * self.seq, self.seq * 2) def test_wrappers(self): self.o.ind = 4 self.n.ind = 5 self.assertEqual(self.seq.__getitem__(self.o), self.seq[4]) self.assertEqual(self.seq.__mul__(self.o), self.seq * 4) self.assertEqual(self.seq.__rmul__(self.o), self.seq * 4) self.assertEqual(self.seq.__getitem__(self.n), self.seq[5]) self.assertEqual(self.seq.__mul__(self.n), self.seq * 5) self.assertEqual(self.seq.__rmul__(self.n), self.seq * 5) def test_subclasses(self): self.assertEqual(self.seq[TrapInt()], self.seq[0]) self.assertEqual(self.seq[TrapLong()], self.seq[0]) def test_error(self): self.o.ind = 'dumb' self.n.ind = 'bad' indexobj = lambda x, obj: obj.seq[x] self.assertRaises(TypeError, indexobj, self.o, self) self.assertRaises(TypeError, indexobj, self.n, self) sliceobj = lambda x, obj: obj.seq[x:] self.assertRaises(TypeError, sliceobj, self.o, self) self.assertRaises(TypeError, sliceobj, self.n, self) class ListTestCase(SeqTestCase): seq = [0,10,20,30,40,50] def test_setdelitem(self): self.o.ind = -2 self.n.ind = 2 lst = list('ab!cdefghi!j') del lst[self.o] del lst[self.n] lst[self.o] = 'X' lst[self.n] = 'Y' self.assertEqual(lst, list('abYdefghXj')) lst = [5, 6, 7, 8, 9, 10, 11] lst.__setitem__(self.n, "here") self.assertEqual(lst, [5, 6, "here", 8, 9, 10, 11]) lst.__delitem__(self.n) self.assertEqual(lst, [5, 6, 8, 9, 10, 11]) def test_inplace_repeat(self): self.o.ind = 2 self.n.ind = 3 lst = [6, 4] lst *= self.o self.assertEqual(lst, [6, 4, 6, 4]) lst *= self.n self.assertEqual(lst, [6, 4, 6, 4] * 3) lst = [5, 6, 7, 8, 9, 11] l2 = lst.__imul__(self.n) self.assertIs(l2, lst) self.assertEqual(lst, [5, 6, 7, 8, 9, 11] * 3) class _BaseSeq: def __init__(self, iterable): self._list = list(iterable) def __repr__(self): return repr(self._list) def __eq__(self, other): return self._list == other def __len__(self): return len(self._list) def __mul__(self, n): return self.__class__(self._list*n) __rmul__ = __mul__ def __getitem__(self, index): return self._list[index] class _GetSliceMixin: def __getslice__(self, i, j): return self._list.__getslice__(i, j) class ClassicSeq(_BaseSeq): pass class NewSeq(_BaseSeq, object): pass class ClassicSeqDeprecated(_GetSliceMixin, ClassicSeq): pass class NewSeqDeprecated(_GetSliceMixin, NewSeq): pass class TupleTestCase(SeqTestCase): seq = (0,10,20,30,40,50) class StringTestCase(SeqTestCase): seq = "this is a test" class ByteArrayTestCase(SeqTestCase): seq = bytearray("this is a test") class UnicodeTestCase(SeqTestCase): seq = u"this is a test" class ClassicSeqTestCase(SeqTestCase): seq = ClassicSeq((0,10,20,30,40,50)) class NewSeqTestCase(SeqTestCase): seq = NewSeq((0,10,20,30,40,50)) class ClassicSeqDeprecatedTestCase(SeqTestCase): seq = ClassicSeqDeprecated((0,10,20,30,40,50)) class NewSeqDeprecatedTestCase(SeqTestCase): seq = NewSeqDeprecated((0,10,20,30,40,50)) class XRangeTestCase(unittest.TestCase): def test_xrange(self): n = newstyle() n.ind = 5 self.assertEqual(xrange(1, 20)[n], 6) self.assertEqual(xrange(1, 20).__getitem__(n), 6) class OverflowTestCase(unittest.TestCase): def setUp(self): self.pos = 2**100 self.neg = -self.pos def test_large_longs(self): self.assertEqual(self.pos.__index__(), self.pos) self.assertEqual(self.neg.__index__(), self.neg) def _getitem_helper(self, base): class GetItem(base): def __len__(self): return maxint # cannot return long here def __getitem__(self, key): return key x = GetItem() self.assertEqual(x[self.pos], self.pos) self.assertEqual(x[self.neg], self.neg) self.assertEqual(x[self.neg:self.pos].indices(maxsize), (0, maxsize, 1)) self.assertEqual(x[self.neg:self.pos:1].indices(maxsize), (0, maxsize, 1)) def _getslice_helper_deprecated(self, base): class GetItem(base): def __len__(self): return maxint # cannot return long here def __getitem__(self, key): return key def __getslice__(self, i, j): return i, j x = GetItem() self.assertEqual(x[self.pos], self.pos) self.assertEqual(x[self.neg], self.neg) self.assertEqual(x[self.neg:self.pos], (maxint+minsize, maxsize)) self.assertEqual(x[self.neg:self.pos:1].indices(maxsize), (0, maxsize, 1)) def test_getitem(self): self._getitem_helper(object) with test_support.check_py3k_warnings(): self._getslice_helper_deprecated(object) def test_getitem_classic(self): class Empty: pass # XXX This test fails (see bug #7532) #self._getitem_helper(Empty) with test_support.check_py3k_warnings(): self._getslice_helper_deprecated(Empty) def test_sequence_repeat(self): self.assertRaises(OverflowError, lambda: "a" * self.pos) self.assertRaises(OverflowError, lambda: "a" * self.neg) def test_main(): test_support.run_unittest( BaseTestCase, ListTestCase, TupleTestCase, ByteArrayTestCase, StringTestCase, UnicodeTestCase, ClassicSeqTestCase, NewSeqTestCase, XRangeTestCase, OverflowTestCase, ) with test_support.check_py3k_warnings(): test_support.run_unittest( ClassicSeqDeprecatedTestCase, NewSeqDeprecatedTestCase, ) if __name__ == "__main__": test_main()
mit
kch8qx/osf.io
scripts/clone_wiki_pages.py
17
2766
""" Create copies of wiki pages for existing forks and registrations instead of using the same NodeWikiPage objects as the original node. """ import logging import sys from modularodm import Q from framework.mongo import database as db from framework.transactions.context import TokuTransaction from website.addons.wiki.model import NodeWikiPage from website.models import Node from website.app import init_app from scripts import utils as script_utils logger = logging.getLogger(__name__) BACKUP_COLLECTION = 'unmigratedwikipages' def main(): nodes = db.node.find({}, {'_id': True, 'wiki_pages_versions': True, 'wiki_pages_current': True}) nodes = nodes.batch_size(200) update_wiki_pages(nodes) def update_wiki_pages(nodes): for i, node in enumerate(nodes): if node['wiki_pages_versions']: cloned_wiki_pages = {} for key, wiki_versions in node['wiki_pages_versions'].items(): cloned_wiki_pages[key] = [] for wiki_id in wiki_versions: node_wiki = NodeWikiPage.load(wiki_id) if not node_wiki: continue if node_wiki.to_storage()['node'] != node['_id']: if not node_wiki.node: move_to_backup_collection(node_wiki) continue clone = node_wiki.clone_wiki(node['_id']) logger.info('Cloned wiki page {} from node {} to {}'.format(wiki_id, node_wiki.node, node['_id'])) cloned_wiki_pages[key].append(clone._id) # update current wiki page if node_wiki.is_current: wiki_pages_current = node['wiki_pages_current'] wiki_pages_current[key] = clone._id db.node.update({'_id': node['_id']}, {'$set': {'wiki_pages_current': wiki_pages_current}}) else: cloned_wiki_pages[key].append(wiki_id) db.node.update({'_id': node['_id']}, {'$set': {'wiki_pages_versions': cloned_wiki_pages}}) # Wiki pages with nodes that no longer exist are removed from NodeWikiPage # and put into a separate collection def move_to_backup_collection(node_wiki_page): db[BACKUP_COLLECTION].insert(node_wiki_page.to_storage()) NodeWikiPage.remove_one(Q('_id', 'eq', node_wiki_page._id)) if __name__ == '__main__': dry = '--dry' in sys.argv if not dry: script_utils.add_file_logger(logger, __file__) init_app(routes=False, set_backends=True) with TokuTransaction(): main() if dry: raise Exception('Dry Run -- Aborting Transaction')
apache-2.0
hakanozadam/bal
bal/core/step.py
1
10734
# AUTHORS: # Hakan Ozadam # Rachel Brown # # Moore Laboratory # UMASS Medical School / HHMI # RNA Therapeutics Institute # Albert Sherman Center, ASC4-1009 # 368 Plantation Street # Worcester, MA 01605 # USA # ################################################################# from abc import abstractmethod, ABCMeta import subprocess import os import datetime from .exceptions import * ################################################################# ################################################################# class Step(metaclass = ABCMeta): ##################################################################### def __init__(self, name, input_files, output_directory, executable = '' , executable_arguments = ''): self.name = name if not name: raise StepError("No name given!") self.input_files = input_files self.output_directory = os.path.abspath(output_directory) self.log_contents = list() self.log_file = os.path.join(self.output_directory, self.name + ".bal.log") self.success_file = os.path.join(self.output_directory, "success.bal.log") self.failure_file = os.path.join(self.output_directory, "failure.bal.log") self.executable = executable self.executable_arguments = executable_arguments self.command = '' self.module = '' self.error_messages = list() ####################################################################### def __del__(self): pass ####################################################################### @abstractmethod def prepare(self): pass ######################################################################## # Since this function wil be overridden if there is a module run # we set it to fail to remind ours.elves it must be overridden in case of a module run. def _module_run(self): subprocess.call('touch ' + self.failure_file , shell=True ) raise(StepError("You have to override the method _module_run.")) ######################################################################### def _executable_run(self): p = subprocess.Popen([self.command + self.executable_arguments], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell = True) std_out , std_err = p.communicate() self.std_out = std_out.decode("utf-8").rstrip() self.std_err = std_err.decode("utf-8").rstrip() self.returncode = p.returncode ######################################################################### def _only_run(self): if self.command: return self._executable_run() else: return self._module_run() ######################################################################### def run(self): if os.path.exists(self.success_file): os.remove(self.success_file) if os.path.exists(self.failure_file): os.remove(self.failure_file) os.makedirs(self.output_directory , exist_ok = True) self.start_time = datetime.datetime.now() self.prepare() self._only_run() self.post_run() self.end_time = datetime.datetime.now() self.write_log() ########################################################################## def post_run(self): if self.command: if self.returncode: subprocess.call('touch ' + self.failure_file , shell=True ) else: subprocess.call('touch ' + self.success_file , shell=True ) ########################################################################## @property def did_run(self): if self.did_fail or self.did_success: return True else: return False ########################################################################## @property def did_fail(self): if os.path.isfile(self.failure_file): return True else: return False ########################################################################## @property def did_success(self): if os.path.isfile(self.success_file): return True else: return False ########################################################################### @property def input_files(self): if hasattr(self, '_input_files'): return self._input_files else: return list() ########################################################################### @input_files.setter def input_files(self, files): self._input_files = list() missing_files = list() for file in files: file_path = os.path.abspath(file) if not os.path.isfile(file_path): missing_files.append(file_path) self._input_files.append(file_path) if len(missing_files) > 0: raise(InputError("Error: The following files couldn't be found\n{files}".\ format(files = "\n".join(missing_files)))) ########################################################################### def cleanup(self): pass ############################################################################ @property def report(self): pass ############################################################################# @property def running_time(self): return int( (self.end_time - self.start_time).seconds ) ############################################################################# @property def executable(self): if hasattr(self, '_executable'): return self._executable else: return '' ############################################################################# @executable.setter def executable(self, executable): if executable == '': self._executable = '' return p = subprocess.Popen(["which " + executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell = True) p.communicate() if p.returncode: subprocess.call('touch ' + self.failure_file , shell=True ) raise ExecutableError("Error: Couldn't find the executable {executable}.".format(executable = executable)) else: self._executable = executable ############################################################################# def write_log(self): time_format = "%m %d %Y , %H:%M:%S" run_result = 'Did not run' if self.did_success: run_result = 'Success' elif self.did_fail: run_result = 'Fail' with open(self.log_file, 'w') as log_stream: print("Step Name: " + self.name, file = log_stream) print("Result: ", run_result, file = log_stream ) print("Start Time: " + str(self.start_time.strftime(time_format)) , file = log_stream) print("End Time: " + str(self.end_time.strftime(time_format)) , file = log_stream ) print("Running Time: " + str( self.running_time ) + ' seconds' , file = log_stream) if(self.command): print("Command: " + self.command, file = log_stream) print("Return Code: " + str(self.returncode), file = log_stream) print("Std Out:\n" + self.std_out, file = log_stream) print("Std Error:\n" + self.std_err, file = log_stream) else: print("Module: " + self.module, file = log_stream ) if( len(self.log_contents) > 0 ): print('-------------------', file = log_stream) for log_entry in self.log_contents: print( log_entry, file = log_stream) if len(self.error_messages) > 0 : error_message = "\n".join(self.error_messages) print('-------------------', file = log_stream) print('Error Message:', file = log_stream) print(error_message, file = log_stream) ############################################################################# def file_list_existence_check(file_list): missing_files = list() for f in file_list: if not os.path.exists(f): missing_files.append(f) return missing_files ############################################################################## def run_parallel_subprocess(self, n): active_processes = list() if len(self.commands) <= n: for c in self.commands: active_processes.append(subprocess.Popen([c + self.executable_arguments], stdout= subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell = True)) else: for c in self.commands[0:n]: active_processes.append(subprocess.Popen([c + self.executable_arguments], stdout= subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell = True)) for c in self.commands[n:]: process_removed = False while not process_removed: for p in active_processes: if p.poll() is not None: p.stdout.close() process_removed = True active_processes.remove(p) active_processes.append(subprocess.Popen([c + self.executable_arguments], stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds=True, shell = True)) break #### Wait for the remaining processes while active_processes: for p in active_processes: if p.poll() is not None: p.stdout.close() active_processes.remove(p) ###################################################################################
gpl-2.0
Endika/c2c-rd-addons
c2c_budget_report/__init__.py
4
1415
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) Camptocamp SA - http://www.camptocamp.com # Author: Arnaud WÃŒst ported by nbessi # # This file is part of the c2c_budget module # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import wizard import c2c_budget_item import report import c2c_budget_sequence
agpl-3.0
HSAnet/glimpse_client
3rdparty/breakpad/src/tools/gyp/test/defines-escaping/gyptest-defines-escaping.py
350
4737
#!/usr/bin/env python # Copyright (c) 2010 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies build of an executable with C++ define specified by a gyp define using various special characters such as quotes, commas, etc. """ import os import TestGyp test = TestGyp.TestGyp() # Tests string literals, percents, and backslash escapes. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n' """ r"""test_args='"Simple test of %s with a literal"'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.build('defines-escaping.gyp') expect = """ Simple test of %s with a literal """ test.run_built_executable('defines_escaping', stdout=expect) # Test multiple comma-and-space-separated string literals. try: os.environ['GYP_DEFINES'] = \ r"""test_format='\n%s and %s\n' test_args='"foo", "bar"'""" test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ foo and bar """ test.run_built_executable('defines_escaping', stdout=expect) # Test string literals containing quotes. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s %s %s %s %s\n' """ r"""test_args='"\"These,\"",""" r""" "\"words,\"",""" r""" "\"are,\"",""" r""" "\"in,\"",""" r""" "\"quotes.\""'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ "These," "words," "are," "in," "quotes." """ test.run_built_executable('defines_escaping', stdout=expect) # Test string literals containing single quotes. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s %s %s %s %s\n' """ r"""test_args="\"'These,'\",""" r""" \"'words,'\",""" r""" \"'are,'\",""" r""" \"'in,'\",""" r""" \"'quotes.'\"" """) test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ 'These,' 'words,' 'are,' 'in,' 'quotes.' """ test.run_built_executable('defines_escaping', stdout=expect) # Test string literals containing different numbers of backslashes before quotes # (to exercise Windows' quoting behaviour). try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n%s\n%s\n' """ r"""test_args='"\\\"1 visible slash\\\"",""" r""" "\\\\\"2 visible slashes\\\\\"",""" r""" "\\\\\\\"3 visible slashes\\\\\\\""'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = r""" \"1 visible slash\" \\"2 visible slashes\\" \\\"3 visible slashes\\\" """ test.run_built_executable('defines_escaping', stdout=expect) # Test that various scary sequences are passed unfettered. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n' """ r"""test_args='"$foo, &quot; `foo`;"'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ $foo, &quot; `foo`; """ test.run_built_executable('defines_escaping', stdout=expect) # VisualStudio 2010 can't handle passing %PATH% if not (test.format == 'msvs' and test.uses_msbuild): try: os.environ['GYP_DEFINES'] = ( """test_format='%s' """ """test_args='"%PATH%"'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = "%PATH%" test.run_built_executable('defines_escaping', stdout=expect) # Test commas and semi-colons preceded by backslashes (to exercise Windows' # quoting behaviour). try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n%s\n' """ r"""test_args='"\\, \\\\;",""" # Same thing again, but enclosed in visible quotes. r""" "\"\\, \\\\;\""'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = r""" \, \\; "\, \\;" """ test.run_built_executable('defines_escaping', stdout=expect) # We deliberately do not test having an odd number of quotes in a string # literal because that isn't feasible in MSVS. test.pass_test()
bsd-3-clause
larsbergstrom/servo
tests/wpt/web-platform-tests/tools/third_party/funcsigs/funcsigs/__init__.py
48
30011
# Copyright 2001-2013 Python Software Foundation; All Rights Reserved """Function signature objects for callables Back port of Python 3.3's function signature tools from the inspect module, modified to be compatible with Python 2.6, 2.7 and 3.2+. """ from __future__ import absolute_import, division, print_function import itertools import functools import re import types try: from collections import OrderedDict except ImportError: from funcsigs.odict import OrderedDict from funcsigs.version import __version__ __all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature'] _WrapperDescriptor = type(type.__call__) _MethodWrapper = type(all.__call__) _NonUserDefinedCallables = (_WrapperDescriptor, _MethodWrapper, types.BuiltinFunctionType) def formatannotation(annotation, base_module=None): if isinstance(annotation, type): if annotation.__module__ in ('builtins', '__builtin__', base_module): return annotation.__name__ return annotation.__module__+'.'+annotation.__name__ return repr(annotation) def _get_user_defined_method(cls, method_name, *nested): try: if cls is type: return meth = getattr(cls, method_name) for name in nested: meth = getattr(meth, name, meth) except AttributeError: return else: if not isinstance(meth, _NonUserDefinedCallables): # Once '__signature__' will be added to 'C'-level # callables, this check won't be necessary return meth def signature(obj): '''Get a signature object for the passed callable.''' if not callable(obj): raise TypeError('{0!r} is not a callable object'.format(obj)) if isinstance(obj, types.MethodType): sig = signature(obj.__func__) if obj.__self__ is None: # Unbound method: the first parameter becomes positional-only if sig.parameters: first = sig.parameters.values()[0].replace( kind=_POSITIONAL_ONLY) return sig.replace( parameters=(first,) + tuple(sig.parameters.values())[1:]) else: return sig else: # In this case we skip the first parameter of the underlying # function (usually `self` or `cls`). return sig.replace(parameters=tuple(sig.parameters.values())[1:]) try: sig = obj.__signature__ except AttributeError: pass else: if sig is not None: return sig try: # Was this function wrapped by a decorator? wrapped = obj.__wrapped__ except AttributeError: pass else: return signature(wrapped) if isinstance(obj, types.FunctionType): return Signature.from_function(obj) if isinstance(obj, functools.partial): sig = signature(obj.func) new_params = OrderedDict(sig.parameters.items()) partial_args = obj.args or () partial_keywords = obj.keywords or {} try: ba = sig.bind_partial(*partial_args, **partial_keywords) except TypeError as ex: msg = 'partial object {0!r} has incorrect arguments'.format(obj) raise ValueError(msg) for arg_name, arg_value in ba.arguments.items(): param = new_params[arg_name] if arg_name in partial_keywords: # We set a new default value, because the following code # is correct: # # >>> def foo(a): print(a) # >>> print(partial(partial(foo, a=10), a=20)()) # 20 # >>> print(partial(partial(foo, a=10), a=20)(a=30)) # 30 # # So, with 'partial' objects, passing a keyword argument is # like setting a new default value for the corresponding # parameter # # We also mark this parameter with '_partial_kwarg' # flag. Later, in '_bind', the 'default' value of this # parameter will be added to 'kwargs', to simulate # the 'functools.partial' real call. new_params[arg_name] = param.replace(default=arg_value, _partial_kwarg=True) elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and not param._partial_kwarg): new_params.pop(arg_name) return sig.replace(parameters=new_params.values()) sig = None if isinstance(obj, type): # obj is a class or a metaclass # First, let's see if it has an overloaded __call__ defined # in its metaclass call = _get_user_defined_method(type(obj), '__call__') if call is not None: sig = signature(call) else: # Now we check if the 'obj' class has a '__new__' method new = _get_user_defined_method(obj, '__new__') if new is not None: sig = signature(new) else: # Finally, we should have at least __init__ implemented init = _get_user_defined_method(obj, '__init__') if init is not None: sig = signature(init) elif not isinstance(obj, _NonUserDefinedCallables): # An object with __call__ # We also check that the 'obj' is not an instance of # _WrapperDescriptor or _MethodWrapper to avoid # infinite recursion (and even potential segfault) call = _get_user_defined_method(type(obj), '__call__', 'im_func') if call is not None: sig = signature(call) if sig is not None: # For classes and objects we skip the first parameter of their # __call__, __new__, or __init__ methods return sig.replace(parameters=tuple(sig.parameters.values())[1:]) if isinstance(obj, types.BuiltinFunctionType): # Raise a nicer error message for builtins msg = 'no signature found for builtin function {0!r}'.format(obj) raise ValueError(msg) raise ValueError('callable {0!r} is not supported by signature'.format(obj)) class _void(object): '''A private marker - used in Parameter & Signature''' class _empty(object): pass class _ParameterKind(int): def __new__(self, *args, **kwargs): obj = int.__new__(self, *args) obj._name = kwargs['name'] return obj def __str__(self): return self._name def __repr__(self): return '<_ParameterKind: {0!r}>'.format(self._name) _POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY') _POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD') _VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL') _KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY') _VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD') class Parameter(object): '''Represents a parameter in a function signature. Has the following public attributes: * name : str The name of the parameter as a string. * default : object The default value for the parameter if specified. If the parameter has no default value, this attribute is not set. * annotation The annotation for the parameter if specified. If the parameter has no annotation, this attribute is not set. * kind : str Describes how argument values are bound to the parameter. Possible values: `Parameter.POSITIONAL_ONLY`, `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. ''' __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg') POSITIONAL_ONLY = _POSITIONAL_ONLY POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD VAR_POSITIONAL = _VAR_POSITIONAL KEYWORD_ONLY = _KEYWORD_ONLY VAR_KEYWORD = _VAR_KEYWORD empty = _empty def __init__(self, name, kind, default=_empty, annotation=_empty, _partial_kwarg=False): if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD, _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD): raise ValueError("invalid value for 'Parameter.kind' attribute") self._kind = kind if default is not _empty: if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): msg = '{0} parameters cannot have default values'.format(kind) raise ValueError(msg) self._default = default self._annotation = annotation if name is None: if kind != _POSITIONAL_ONLY: raise ValueError("None is not a valid name for a " "non-positional-only parameter") self._name = name else: name = str(name) if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I): msg = '{0!r} is not a valid parameter name'.format(name) raise ValueError(msg) self._name = name self._partial_kwarg = _partial_kwarg @property def name(self): return self._name @property def default(self): return self._default @property def annotation(self): return self._annotation @property def kind(self): return self._kind def replace(self, name=_void, kind=_void, annotation=_void, default=_void, _partial_kwarg=_void): '''Creates a customized copy of the Parameter.''' if name is _void: name = self._name if kind is _void: kind = self._kind if annotation is _void: annotation = self._annotation if default is _void: default = self._default if _partial_kwarg is _void: _partial_kwarg = self._partial_kwarg return type(self)(name, kind, default=default, annotation=annotation, _partial_kwarg=_partial_kwarg) def __str__(self): kind = self.kind formatted = self._name if kind == _POSITIONAL_ONLY: if formatted is None: formatted = '' formatted = '<{0}>'.format(formatted) # Add annotation and default value if self._annotation is not _empty: formatted = '{0}:{1}'.format(formatted, formatannotation(self._annotation)) if self._default is not _empty: formatted = '{0}={1}'.format(formatted, repr(self._default)) if kind == _VAR_POSITIONAL: formatted = '*' + formatted elif kind == _VAR_KEYWORD: formatted = '**' + formatted return formatted def __repr__(self): return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__, id(self), self.name) def __hash__(self): msg = "unhashable type: '{0}'".format(self.__class__.__name__) raise TypeError(msg) def __eq__(self, other): return (issubclass(other.__class__, Parameter) and self._name == other._name and self._kind == other._kind and self._default == other._default and self._annotation == other._annotation) def __ne__(self, other): return not self.__eq__(other) class BoundArguments(object): '''Result of `Signature.bind` call. Holds the mapping of arguments to the function's parameters. Has the following public attributes: * arguments : OrderedDict An ordered mutable mapping of parameters' names to arguments' values. Does not contain arguments' default values. * signature : Signature The Signature object that created this instance. * args : tuple Tuple of positional arguments values. * kwargs : dict Dict of keyword arguments values. ''' def __init__(self, signature, arguments): self.arguments = arguments self._signature = signature @property def signature(self): return self._signature @property def args(self): args = [] for param_name, param in self._signature.parameters.items(): if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or param._partial_kwarg): # Keyword arguments mapped by 'functools.partial' # (Parameter._partial_kwarg is True) are mapped # in 'BoundArguments.kwargs', along with VAR_KEYWORD & # KEYWORD_ONLY break try: arg = self.arguments[param_name] except KeyError: # We're done here. Other arguments # will be mapped in 'BoundArguments.kwargs' break else: if param.kind == _VAR_POSITIONAL: # *args args.extend(arg) else: # plain argument args.append(arg) return tuple(args) @property def kwargs(self): kwargs = {} kwargs_started = False for param_name, param in self._signature.parameters.items(): if not kwargs_started: if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or param._partial_kwarg): kwargs_started = True else: if param_name not in self.arguments: kwargs_started = True continue if not kwargs_started: continue try: arg = self.arguments[param_name] except KeyError: pass else: if param.kind == _VAR_KEYWORD: # **kwargs kwargs.update(arg) else: # plain keyword argument kwargs[param_name] = arg return kwargs def __hash__(self): msg = "unhashable type: '{0}'".format(self.__class__.__name__) raise TypeError(msg) def __eq__(self, other): return (issubclass(other.__class__, BoundArguments) and self.signature == other.signature and self.arguments == other.arguments) def __ne__(self, other): return not self.__eq__(other) class Signature(object): '''A Signature object represents the overall signature of a function. It stores a Parameter object for each parameter accepted by the function, as well as information specific to the function itself. A Signature object has the following public attributes and methods: * parameters : OrderedDict An ordered mapping of parameters' names to the corresponding Parameter objects (keyword-only arguments are in the same order as listed in `code.co_varnames`). * return_annotation : object The annotation for the return type of the function if specified. If the function has no annotation for its return type, this attribute is not set. * bind(*args, **kwargs) -> BoundArguments Creates a mapping from positional and keyword arguments to parameters. * bind_partial(*args, **kwargs) -> BoundArguments Creates a partial mapping from positional and keyword arguments to parameters (simulating 'functools.partial' behavior.) ''' __slots__ = ('_return_annotation', '_parameters') _parameter_cls = Parameter _bound_arguments_cls = BoundArguments empty = _empty def __init__(self, parameters=None, return_annotation=_empty, __validate_parameters__=True): '''Constructs Signature from the given list of Parameter objects and 'return_annotation'. All arguments are optional. ''' if parameters is None: params = OrderedDict() else: if __validate_parameters__: params = OrderedDict() top_kind = _POSITIONAL_ONLY for idx, param in enumerate(parameters): kind = param.kind if kind < top_kind: msg = 'wrong parameter order: {0} before {1}' msg = msg.format(top_kind, param.kind) raise ValueError(msg) else: top_kind = kind name = param.name if name is None: name = str(idx) param = param.replace(name=name) if name in params: msg = 'duplicate parameter name: {0!r}'.format(name) raise ValueError(msg) params[name] = param else: params = OrderedDict(((param.name, param) for param in parameters)) self._parameters = params self._return_annotation = return_annotation @classmethod def from_function(cls, func): '''Constructs Signature for the given python function''' if not isinstance(func, types.FunctionType): raise TypeError('{0!r} is not a Python function'.format(func)) Parameter = cls._parameter_cls # Parameter information. func_code = func.__code__ pos_count = func_code.co_argcount arg_names = func_code.co_varnames positional = tuple(arg_names[:pos_count]) keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0) keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)] annotations = getattr(func, '__annotations__', {}) defaults = func.__defaults__ kwdefaults = getattr(func, '__kwdefaults__', None) if defaults: pos_default_count = len(defaults) else: pos_default_count = 0 parameters = [] # Non-keyword-only parameters w/o defaults. non_default_count = pos_count - pos_default_count for name in positional[:non_default_count]: annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD)) # ... w/ defaults. for offset, name in enumerate(positional[non_default_count:]): annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD, default=defaults[offset])) # *args if func_code.co_flags & 0x04: name = arg_names[pos_count + keyword_only_count] annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_VAR_POSITIONAL)) # Keyword-only parameters. for name in keyword_only: default = _empty if kwdefaults is not None: default = kwdefaults.get(name, _empty) annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_KEYWORD_ONLY, default=default)) # **kwargs if func_code.co_flags & 0x08: index = pos_count + keyword_only_count if func_code.co_flags & 0x04: index += 1 name = arg_names[index] annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_VAR_KEYWORD)) return cls(parameters, return_annotation=annotations.get('return', _empty), __validate_parameters__=False) @property def parameters(self): try: return types.MappingProxyType(self._parameters) except AttributeError: return OrderedDict(self._parameters.items()) @property def return_annotation(self): return self._return_annotation def replace(self, parameters=_void, return_annotation=_void): '''Creates a customized copy of the Signature. Pass 'parameters' and/or 'return_annotation' arguments to override them in the new copy. ''' if parameters is _void: parameters = self.parameters.values() if return_annotation is _void: return_annotation = self._return_annotation return type(self)(parameters, return_annotation=return_annotation) def __hash__(self): msg = "unhashable type: '{0}'".format(self.__class__.__name__) raise TypeError(msg) def __eq__(self, other): if (not issubclass(type(other), Signature) or self.return_annotation != other.return_annotation or len(self.parameters) != len(other.parameters)): return False other_positions = dict((param, idx) for idx, param in enumerate(other.parameters.keys())) for idx, (param_name, param) in enumerate(self.parameters.items()): if param.kind == _KEYWORD_ONLY: try: other_param = other.parameters[param_name] except KeyError: return False else: if param != other_param: return False else: try: other_idx = other_positions[param_name] except KeyError: return False else: if (idx != other_idx or param != other.parameters[param_name]): return False return True def __ne__(self, other): return not self.__eq__(other) def _bind(self, args, kwargs, partial=False): '''Private method. Don't use directly.''' arguments = OrderedDict() parameters = iter(self.parameters.values()) parameters_ex = () arg_vals = iter(args) if partial: # Support for binding arguments to 'functools.partial' objects. # See 'functools.partial' case in 'signature()' implementation # for details. for param_name, param in self.parameters.items(): if (param._partial_kwarg and param_name not in kwargs): # Simulating 'functools.partial' behavior kwargs[param_name] = param.default while True: # Let's iterate through the positional arguments and corresponding # parameters try: arg_val = next(arg_vals) except StopIteration: # No more positional arguments try: param = next(parameters) except StopIteration: # No more parameters. That's it. Just need to check that # we have no `kwargs` after this while loop break else: if param.kind == _VAR_POSITIONAL: # That's OK, just empty *args. Let's start parsing # kwargs break elif param.name in kwargs: if param.kind == _POSITIONAL_ONLY: msg = '{arg!r} parameter is positional only, ' \ 'but was passed as a keyword' msg = msg.format(arg=param.name) raise TypeError(msg) parameters_ex = (param,) break elif (param.kind == _VAR_KEYWORD or param.default is not _empty): # That's fine too - we have a default value for this # parameter. So, lets start parsing `kwargs`, starting # with the current parameter parameters_ex = (param,) break else: if partial: parameters_ex = (param,) break else: msg = '{arg!r} parameter lacking default value' msg = msg.format(arg=param.name) raise TypeError(msg) else: # We have a positional argument to process try: param = next(parameters) except StopIteration: raise TypeError('too many positional arguments') else: if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): # Looks like we have no parameter for this positional # argument raise TypeError('too many positional arguments') if param.kind == _VAR_POSITIONAL: # We have an '*args'-like argument, let's fill it with # all positional arguments we have left and move on to # the next phase values = [arg_val] values.extend(arg_vals) arguments[param.name] = tuple(values) break if param.name in kwargs: raise TypeError('multiple values for argument ' '{arg!r}'.format(arg=param.name)) arguments[param.name] = arg_val # Now, we iterate through the remaining parameters to process # keyword arguments kwargs_param = None for param in itertools.chain(parameters_ex, parameters): if param.kind == _POSITIONAL_ONLY: # This should never happen in case of a properly built # Signature object (but let's have this check here # to ensure correct behaviour just in case) raise TypeError('{arg!r} parameter is positional only, ' 'but was passed as a keyword'. \ format(arg=param.name)) if param.kind == _VAR_KEYWORD: # Memorize that we have a '**kwargs'-like parameter kwargs_param = param continue param_name = param.name try: arg_val = kwargs.pop(param_name) except KeyError: # We have no value for this parameter. It's fine though, # if it has a default value, or it is an '*args'-like # parameter, left alone by the processing of positional # arguments. if (not partial and param.kind != _VAR_POSITIONAL and param.default is _empty): raise TypeError('{arg!r} parameter lacking default value'. \ format(arg=param_name)) else: arguments[param_name] = arg_val if kwargs: if kwargs_param is not None: # Process our '**kwargs'-like parameter arguments[kwargs_param.name] = kwargs else: raise TypeError('too many keyword arguments') return self._bound_arguments_cls(self, arguments) def bind(self, *args, **kwargs): '''Get a BoundArguments object, that maps the passed `args` and `kwargs` to the function's signature. Raises `TypeError` if the passed arguments can not be bound. ''' return self._bind(args, kwargs) def bind_partial(self, *args, **kwargs): '''Get a BoundArguments object, that partially maps the passed `args` and `kwargs` to the function's signature. Raises `TypeError` if the passed arguments can not be bound. ''' return self._bind(args, kwargs, partial=True) def __str__(self): result = [] render_kw_only_separator = True for idx, param in enumerate(self.parameters.values()): formatted = str(param) kind = param.kind if kind == _VAR_POSITIONAL: # OK, we have an '*args'-like parameter, so we won't need # a '*' to separate keyword-only arguments render_kw_only_separator = False elif kind == _KEYWORD_ONLY and render_kw_only_separator: # We have a keyword-only parameter to render and we haven't # rendered an '*args'-like parameter before, so add a '*' # separator to the parameters list ("foo(arg1, *, arg2)" case) result.append('*') # This condition should be only triggered once, so # reset the flag render_kw_only_separator = False result.append(formatted) rendered = '({0})'.format(', '.join(result)) if self.return_annotation is not _empty: anno = formatannotation(self.return_annotation) rendered += ' -> {0}'.format(anno) return rendered
mpl-2.0
msscully/datamart
datamart/secure_redirect.py
1
1259
""" Takend from http://flask.pocoo.org/snippets/63/ """ from urlparse import urlparse, urljoin from flask import request, url_for, redirect from flask.ext.wtf import Form from wtforms import HiddenField def is_safe_url(target): ref_url = urlparse(request.host_url) test_url = urlparse(urljoin(request.host_url, target)) return test_url.scheme in ('http', 'https') and \ ref_url.netloc == test_url.netloc def get_redirect_target(): for target in request.values.get('next'), request.referrer: if not target: continue if is_safe_url(target): return target def redirect_back(target, endpoint, **values): if not target or not is_safe_url(target): target = url_for(endpoint, **values) return redirect(target) class RedirectForm(Form): next = HiddenField() def __init__(self, *args, **kwargs): Form.__init__(self, *args, **kwargs) if not self.next.data: self.next.data = get_redirect_target() or '' def redirect(self, endpoint='index', **values): if is_safe_url(self.next.data): return redirect(self.next.data) target = get_redirect_target() return redirect(target or url_for(endpoint, **values))
mit
wd15/corr
corr-db/corrdb/common/models/profile_model.py
1
1739
import datetime from ..core import db from ..models import UserModel from ..models import FileModel import json from bson import ObjectId class ProfileModel(db.Document): created_at = db.StringField(default=str(datetime.datetime.utcnow())) user = db.ReferenceField(UserModel, reverse_delete_rule=db.CASCADE, required=True) fname = db.StringField(required=True) lname = db.StringField(required=True) picture = db.ReferenceField(FileModel) organisation = db.StringField() about = db.StringField() extend = db.DictField() def clone(self): del self.__dict__['_id'] del self.__dict__['_created'] del self.__dict__['_changed_fields'] self.id = ObjectId() def info(self): data = {'created':str(self.created_at), 'id': str(self.id), 'user':str(self.user.id), 'fname': self.fname, 'lname': self.lname} if self.picture != None: data['picture'] = str(self.picture.id) else: data['picture'] = None return data def extended(self): data = self.info() data['organisation'] = self.organisation data['about'] = self.about data['extend'] = self.extend return data def to_json(self): data = self.extended() return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) def summary_json(self): data = self.info() data['organisation'] = self.organisation if self.about != None: data['about'] = self.about[0:96]+"..." if len(self.about) >=100 else self.about else: data['about'] = None return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
mit
Chemcy/vnpy
vn.trader/ctaStrategy/datayesClient.py
14
2853
# encoding: UTF-8 '''一个简单的通联数据客户端,主要使用requests开发,比通联官网的python例子更为简洁。''' import requests import json FILENAME = 'datayes.json' HTTP_OK = 200 ######################################################################## class DatayesClient(object): """通联数据客户端""" name = u'通联数据客户端' #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.domain = '' # 主域名 self.version = '' # API版本 self.token = '' # 授权码 self.header = {} # http请求头部 self.settingLoaded = False # 配置是否已经读取 self.loadSetting() #---------------------------------------------------------------------- def loadSetting(self): """载入配置""" try: path = os.path.abspath(os.path.dirname(__file__)) FILENAME = os.path.join(path, FILENAME) f = file(FILENAME) except IOError: print u'%s无法打开配置文件' % self.name return setting = json.load(f) try: self.domain = str(setting['domain']) self.version = str(setting['version']) self.token = str(setting['token']) except KeyError: print u'%s配置文件字段缺失' % self.name return self.header['Connection'] = 'keep_alive' self.header['Authorization'] = 'Bearer ' + self.token self.settingLoaded = True print u'%s配置载入完成' % self.name #---------------------------------------------------------------------- def downloadData(self, path, params): """下载数据""" if not self.settingLoaded: print u'%s配置未载入' % self.name return None else: url = '/'.join([self.domain, self.version, path]) r = requests.get(url=url, headers=self.header, params=params) if r.status_code != HTTP_OK: print u'%shttp请求失败,状态代码%s' %(self.name, r.status_code) return None else: result = r.json() if 'retMsg' in result and result['retMsg'] == 'Success': return result['data'] else: if 'retMsg' in result: print u'%s查询失败,返回信息%s' %(self.name, result['retMsg']) elif 'message' in result: print u'%s查询失败,返回信息%s' %(self.name, result['message']) return None
mit
bestwpw/mysql-5.6
xtrabackup/test/python/BytesIO.py
31
3807
# http://wiki.python.org/moin/BytesIO # # A skeleton one used for systems that don't have BytesIO. # # It's enough for subunit at least.... class BytesIO(object): """ A file-like API for reading and writing bytes objects. Mostly like StringIO, but write() calls modify the underlying bytes object. >>> b = bytes() >>> f = BytesIO(b, 'w') >>> f.write(bytes.fromhex('ca fe ba be')) >>> f.write(bytes.fromhex('57 41 56 45')) >>> b bytes([202, 254, 186, 190, 87, 65, 86, 69]) """ def __init__(self, buf, mode='r'): """ Create a new BytesIO for reading or writing the given buffer. buf - Back-end buffer for this BytesIO. A bytes object. Actually, anything that supports len(), slice-assignment, and += will work. mode - One of 'r', 'w', 'a'. An optional 'b' is also allowed, but it doesn't do anything. """ # XXX many 'mode' possibilities aren't allowed yet: 'rw+Ut' if len(mode) == 2 and mode[-1] == 'b': mode = mode[:-1] # binary mode goes without saying if mode not in ('r', 'w', 'a'): raise ValueError("mode must be 'r', 'w', or 'a'") self._buf = buf self.mode = mode self.closed = False if self.mode == 'w': del buf[:] self._point = 0 elif self.mode == 'r': self._point = 0 else: # 'a' self._point = len(buf) def close(self): self.closed = True def _check_closed(self): if self.closed: raise ValueError("file is closed") def flush(self): self._check_closed() def next(self): line = self.readline() if len(line) == 0: raise StopIteration return line def read(self, size=None): self._check_closed() if size is None: e = len(self._buf) else: e = min(self._point + size, len(self._buf)) r = self._buf[self._point:e] self._point = e return r def readline(self, size=None): self._check_closed() die # XXX TODO - assume ascii and read a line def readlines(self, sizehint=None): # XXX TODO handle sizehint return list(self) def seek(self, offset, whence=0): self._check_closed() if whence == 0: self._point = offset elif whence == 1: self._point += offset elif whence == 2: self._point = len(self._buf) + offset else: raise ValueError("whence must be 0, 1, or 2") if self._point < 0: self._point = 0 # XXX is this right? def tell(self): self._check_closed() return self._point def truncate(self, size=None): self._check_closed() if size is None: size = self.tell() del self._buf[size:] def write(self, data): self._check_closed() amt = len(data) size = len(self._buf) if self.mode == 'a': self._point = size if self._point > size: if isinstance(b, bytes): blank = bytes([0]) else: # Don't know what default value to insert, unfortunately raise ValueError("can't write past the end of this object") self._buf += blank * (self._point - size) + data self._point = len(self._buf) else: p = self._point self._buf[p:p + amt] = data self._point = min(p + amt, len(self._buf)) def writelines(self, seq): for line in seq: self.write(line) def __iter__(self): return self @property def name(self): return repr(self)
gpl-2.0
whitmo/ansible-charm
setup.py
1
1123
#!/usr/bin/env python import os from setuptools import setup from setuptools import find_packages PROJECT = u'AnsibleCharm' VERSION = '0.1' URL = "https://blog.juju.solutions" AUTHOR = u'Whit Morriss <whit.morriss@canonical.com>' AUTHOR_EMAIL = u'whit.morriss@canonical.com' DESC = "Python library for charming with ansible" def read_file(file_name): file_path = os.path.join( os.path.dirname(__file__), file_name ) with open(file_path) as fp: return fp.read() setup( name=PROJECT, version=VERSION, description=DESC, long_description=read_file('README.md'), author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, license=read_file('LICENSE'), packages=find_packages(exclude=['examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ "path.py", "charmhelpers" ], # entry_points=""" # """, classifiers=[ 'License :: OSI Approved', 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', "Programming Language :: Python", ], )
gpl-3.0
koobonil/Boss2D
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py
79
3122
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp from tensorflow.contrib.distributions.python.ops.bijectors.inline import Inline from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class InlineBijectorTest(test.TestCase): """Tests correctness of the inline constructed bijector.""" def testBijector(self): with self.test_session(): exp = Exp(event_ndims=1) inline = Inline( forward_fn=math_ops.exp, inverse_fn=math_ops.log, inverse_log_det_jacobian_fn=( lambda y: -math_ops.reduce_sum( # pylint: disable=g-long-lambda math_ops.log(y), reduction_indices=-1)), forward_log_det_jacobian_fn=( lambda x: math_ops.reduce_sum(x, reduction_indices=-1)), name="exp") self.assertEqual(exp.name, inline.name) x = [[[1., 2.], [3., 4.], [5., 6.]]] y = np.exp(x) self.assertAllClose(y, inline.forward(x).eval()) self.assertAllClose(x, inline.inverse(y).eval()) self.assertAllClose( -np.sum(np.log(y), axis=-1), inline.inverse_log_det_jacobian(y).eval()) self.assertAllClose(-inline.inverse_log_det_jacobian(y).eval(), inline.forward_log_det_jacobian(x).eval()) def testShapeGetters(self): with self.test_session(): bijector = Inline( forward_event_shape_tensor_fn=lambda x: array_ops.concat((x, [1]), 0), forward_event_shape_fn=lambda x: x.as_list() + [1], inverse_event_shape_tensor_fn=lambda x: x[:-1], inverse_event_shape_fn=lambda x: x[:-1], name="shape_only") x = tensor_shape.TensorShape([1, 2, 3]) y = tensor_shape.TensorShape([1, 2, 3, 1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( y.as_list(), bijector.forward_event_shape_tensor(x.as_list()).eval()) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( x.as_list(), bijector.inverse_event_shape_tensor(y.as_list()).eval()) if __name__ == "__main__": test.main()
mit
exploreodoo/datStruct
odoo/addons/marketing_campaign/__openerp__.py
260
3127
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Marketing Campaigns', 'version': '1.1', 'depends': ['marketing', 'document', 'email_template', 'decimal_precision' ], 'author': 'OpenERP SA', 'category': 'Marketing', 'description': """ This module provides leads automation through marketing campaigns (campaigns can in fact be defined on any resource, not just CRM Leads). ========================================================================================================================================= The campaigns are dynamic and multi-channels. The process is as follows: ------------------------------------------------------------------------ * Design marketing campaigns like workflows, including email templates to send, reports to print and send by email, custom actions * Define input segments that will select the items that should enter the campaign (e.g leads from certain countries.) * Run your campaign in simulation mode to test it real-time or accelerated, and fine-tune it * You may also start the real campaign in manual mode, where each action requires manual validation * Finally launch your campaign live, and watch the statistics as the campaign does everything fully automatically. While the campaign runs you can of course continue to fine-tune the parameters, input segments, workflow. **Note:** If you need demo data, you can install the marketing_campaign_crm_demo module, but this will also install the CRM application as it depends on CRM Leads. """, 'website': 'https://www.odoo.com/page/lead-automation', 'data': [ 'marketing_campaign_view.xml', 'marketing_campaign_data.xml', 'marketing_campaign_workflow.xml', 'report/campaign_analysis_view.xml', 'security/marketing_campaign_security.xml', 'security/ir.model.access.csv' ], 'demo': ['marketing_campaign_demo.xml'], 'test': ['test/marketing_campaign.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
gpl-2.0