code
stringlengths
1
1.72M
language
stringclasses
1 value
import os import threading import Queue import traceback import atexit import weakref import __future__ # note that the whole code of this module (as well as some # other modules) execute not only on the local side but # also on any gateway's remote side. On such remote sides # we cannot assume the py library to be there and # InstallableGateway._remote_bootstrap_gateway() (located # in register.py) will take care to send source fragments # to the other side. Yes, it is fragile but we have a # few tests that try to catch when we mess up. # XXX the following lines should not be here if 'ThreadOut' not in globals(): import py from py.code import Source from py.__.execnet.channel import ChannelFactory, Channel from py.__.execnet.message import Message ThreadOut = py._thread.ThreadOut WorkerPool = py._thread.WorkerPool NamedThreadPool = py._thread.NamedThreadPool import os debug = 0 # open('/tmp/execnet-debug-%d' % os.getpid() , 'wa') sysex = (KeyboardInterrupt, SystemExit) class Gateway(object): _ThreadOut = ThreadOut remoteaddress = "" def __init__(self, io, execthreads=None, _startcount=2): """ initialize core gateway, using the given inputoutput object and 'execthreads' execution threads. """ global registered_cleanup self._execpool = WorkerPool(maxthreads=execthreads) self._io = io self._outgoing = Queue.Queue() self._channelfactory = ChannelFactory(self, _startcount) if not registered_cleanup: atexit.register(cleanup_atexit) registered_cleanup = True _active_sendqueues[self._outgoing] = True self._pool = NamedThreadPool(receiver = self._thread_receiver, sender = self._thread_sender) def __repr__(self): """ return string representing gateway type and status. """ addr = self.remoteaddress if addr: addr = '[%s]' % (addr,) else: addr = '' try: r = (len(self._pool.getstarted('receiver')) and "receiving" or "not receiving") s = (len(self._pool.getstarted('sender')) and "sending" or "not sending") i = len(self._channelfactory.channels()) except AttributeError: r = s = "uninitialized" i = "no" return "<%s%s %s/%s (%s active channels)>" %( self.__class__.__name__, addr, r, s, i) ## def _local_trystopexec(self): ## self._execpool.shutdown() def _trace(self, *args): if debug: try: l = "\n".join(args).split(os.linesep) id = getid(self) for x in l: print >>debug, x debug.flush() except sysex: raise except: traceback.print_exc() def _traceex(self, excinfo): try: l = traceback.format_exception(*excinfo) errortext = "".join(l) except: errortext = '%s: %s' % (excinfo[0].__name__, excinfo[1]) self._trace(errortext) def _thread_receiver(self): """ thread to read and handle Messages half-sync-half-async. """ try: from sys import exc_info while 1: try: msg = Message.readfrom(self._io) self._trace("received <- %r" % msg) msg.received(self) except sysex: break except EOFError: break except: self._traceex(exc_info()) break finally: self._send(None) self._channelfactory._finished_receiving() self._trace('leaving %r' % threading.currentThread()) def _send(self, msg): self._outgoing.put(msg) def _thread_sender(self): """ thread to send Messages over the wire. """ try: from sys import exc_info while 1: msg = self._outgoing.get() try: if msg is None: self._io.close_write() break msg.writeto(self._io) except: excinfo = exc_info() self._traceex(excinfo) if msg is not None: msg.post_sent(self, excinfo) break else: self._trace('sent -> %r' % msg) msg.post_sent(self) finally: self._trace('leaving %r' % threading.currentThread()) def _local_redirect_thread_output(self, outid, errid): l = [] for name, id in ('stdout', outid), ('stderr', errid): if id: channel = self._channelfactory.new(outid) out = self._ThreadOut(sys, name) out.setwritefunc(channel.send) l.append((out, channel)) def close(): for out, channel in l: out.delwritefunc() channel.close() return close def _thread_executor(self, channel, (source, outid, errid)): """ worker thread to execute source objects from the execution queue. """ from sys import exc_info try: loc = { 'channel' : channel } self._trace("execution starts:", repr(source)[:50]) close = self._local_redirect_thread_output(outid, errid) try: co = compile(source+'\n', '', 'exec', __future__.CO_GENERATOR_ALLOWED) exec co in loc finally: close() self._trace("execution finished:", repr(source)[:50]) except (KeyboardInterrupt, SystemExit): pass except: excinfo = exc_info() l = traceback.format_exception(*excinfo) errortext = "".join(l) channel.close(errortext) self._trace(errortext) else: channel.close() def _local_schedulexec(self, channel, sourcetask): self._trace("dispatching exec") self._execpool.dispatch(self._thread_executor, channel, sourcetask) def _newredirectchannelid(self, callback): if callback is None: return if hasattr(callback, 'write'): callback = callback.write assert callable(callback) chan = self.newchannel() chan.setcallback(callback) return chan.id # _____________________________________________________________________ # # High Level Interface # _____________________________________________________________________ # def newchannel(self): """ return new channel object. """ return self._channelfactory.new() def remote_exec(self, source, stdout=None, stderr=None): """ return channel object and connect it to a remote execution thread where the given 'source' executes and has the sister 'channel' object in its global namespace. The callback functions 'stdout' and 'stderr' get called on receival of remote stdout/stderr output strings. """ try: source = str(Source(source)) except NameError: try: import py source = str(py.code.Source(source)) except ImportError: pass channel = self.newchannel() outid = self._newredirectchannelid(stdout) errid = self._newredirectchannelid(stderr) self._send(Message.CHANNEL_OPEN( channel.id, (source, outid, errid))) return channel def _remote_redirect(self, stdout=None, stderr=None): """ return a handle representing a redirection of a remote end's stdout to a local file object. with handle.close() the redirection will be reverted. """ clist = [] for name, out in ('stdout', stdout), ('stderr', stderr): if out: outchannel = self.newchannel() outchannel.setcallback(getattr(out, 'write', out)) channel = self.remote_exec(""" import sys outchannel = channel.receive() outchannel.gateway._ThreadOut(sys, %r).setdefaultwriter(outchannel.send) """ % name) channel.send(outchannel) clist.append(channel) for c in clist: c.waitclose() class Handle: def close(_): for name, out in ('stdout', stdout), ('stderr', stderr): if out: c = self.remote_exec(""" import sys channel.gateway._ThreadOut(sys, %r).resetdefault() """ % name) c.waitclose() return Handle() def exit(self): """ Try to stop all IO activity. """ try: del _active_sendqueues[self._outgoing] except KeyError: pass else: self._send(None) def join(self, joinexec=True): """ Wait for all IO (and by default all execution activity) to stop. """ current = threading.currentThread() for x in self._pool.getstarted(): if x != current: self._trace("joining %s" % x) x.join() self._trace("joining sender/reciver threads finished, current %r" % current) if joinexec: self._execpool.join() self._trace("joining execution threads finished, current %r" % current) def getid(gw, cache={}): name = gw.__class__.__name__ try: return cache.setdefault(name, {})[id(gw)] except KeyError: cache[name][id(gw)] = x = "%s:%s.%d" %(os.getpid(), gw.__class__.__name__, len(cache[name])) return x registered_cleanup = False _active_sendqueues = weakref.WeakKeyDictionary() def cleanup_atexit(): if debug: print >>debug, "="*20 + "cleaning up" + "=" * 20 debug.flush() while True: try: queue, ignored = _active_sendqueues.popitem() except KeyError: break queue.put(None)
Python
""" InputOutput Classes used for connecting gateways across process or computer barriers. """ import socket, os, sys class SocketIO: server_stmt = """ io = SocketIO(clientsock) import sys #try: # sys.stdout = sys.stderr = open('/tmp/execnet-socket-debug.log', 'a', 0) #except (IOError, OSError): # sys.stdout = sys.stderr = open('/dev/null', 'w') #print '='*60 """ error = (socket.error, EOFError) def __init__(self, sock): self.sock = sock try: sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10) # IPTOS_LOWDELAY except socket.error, e: print "WARNING: Cannot set socket option:", str(e) self.readable = self.writeable = True def read(self, numbytes): "Read exactly 'bytes' bytes from the socket." buf = "" while len(buf) < numbytes: t = self.sock.recv(numbytes - len(buf)) #print 'recv -->', len(t) if not t: raise EOFError buf += t return buf def write(self, data): """write out all bytes to the socket. """ self.sock.sendall(data) def close_read(self): if self.readable: self.sock.shutdown(0) self.readable = None def close_write(self): if self.writeable: self.sock.shutdown(1) self.writeable = None class Popen2IO: server_stmt = """ import sys, StringIO io = Popen2IO(sys.stdout, sys.stdin) sys.stdout = sys.stderr = StringIO.StringIO() #try: # sys.stdout = sys.stderr = open('/tmp/execnet-popen-debug.log', 'a', 0) #except (IOError, OSError): # sys.stdout = sys.stderr = open('/dev/null', 'w') #print '='*60 """ error = (IOError, OSError, EOFError) def __init__(self, infile, outfile): if sys.platform == 'win32': import msvcrt msvcrt.setmode(infile.fileno(), os.O_BINARY) msvcrt.setmode(outfile.fileno(), os.O_BINARY) self.outfile, self.infile = infile, outfile self.readable = self.writeable = True def read(self, numbytes): """Read exactly 'bytes' bytes from the pipe. """ #import sys #print >> sys.stderr, "reading..." s = self.infile.read(numbytes) #print >> sys.stderr, "read: %r" % s if len(s) < numbytes: raise EOFError return s def write(self, data): """write out all bytes to the pipe. """ #import sys #print >> sys.stderr, "writing: %r" % data self.outfile.write(data) self.outfile.flush() def close_read(self): if self.readable: self.infile.close() self.readable = None def close_write(self): if self.writeable: self.outfile.close() self.writeable = None
Python
""" ad-hoc networking mechanism """
Python
# Module doctest. # Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). # Major enhancements and refactoring by: # Jim Fulton # Edward Loper # Provided as-is; use at your own risk; no warranty; no promises; enjoy! r"""Module doctest -- a framework for running examples in docstrings. In simplest use, end each module M to be tested with: def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() Then running the module as a script will cause the examples in the docstrings to get executed and verified: python M.py This won't display anything unless an example fails, in which case the failing example(s) and the cause(s) of the failure(s) are printed to stdout (why not stderr? because stderr is a lame hack <0.2 wink>), and the final line of output is "Test failed.". Run it with the -v switch instead: python M.py -v and a detailed report of all examples tried is printed to stdout, along with assorted summaries at the end. You can force verbose mode by passing "verbose=True" to testmod, or prohibit it by passing "verbose=False". In either of those cases, sys.argv is not examined by testmod. There are a variety of other ways to run doctests, including integration with the unittest framework, and support for running non-Python text files containing doctests. There are also many ways to override parts of doctest's default behaviors. See the Library Reference Manual for details. """ __docformat__ = 'reStructuredText en' __all__ = [ # 0, Option Flags 'register_optionflag', 'DONT_ACCEPT_TRUE_FOR_1', 'DONT_ACCEPT_BLANKLINE', 'NORMALIZE_WHITESPACE', 'ELLIPSIS', 'IGNORE_EXCEPTION_DETAIL', 'COMPARISON_FLAGS', 'REPORT_UDIFF', 'REPORT_CDIFF', 'REPORT_NDIFF', 'REPORT_ONLY_FIRST_FAILURE', 'REPORTING_FLAGS', # 1. Utility Functions 'is_private', # 2. Example & DocTest 'Example', 'DocTest', # 3. Doctest Parser 'DocTestParser', # 4. Doctest Finder 'DocTestFinder', # 5. Doctest Runner 'DocTestRunner', 'OutputChecker', 'DocTestFailure', 'UnexpectedException', 'DebugRunner', # 6. Test Functions 'testmod', 'testfile', 'run_docstring_examples', # 7. Tester 'Tester', # 8. Unittest Support 'DocTestSuite', 'DocFileSuite', 'set_unittest_reportflags', # 9. Debugging Support 'script_from_examples', 'testsource', 'debug_src', 'debug', ] import __future__ import sys, traceback, inspect, linecache, os, re, types import unittest, difflib, pdb, tempfile import warnings from StringIO import StringIO # Don't whine about the deprecated is_private function in this # module's tests. warnings.filterwarnings("ignore", "is_private", DeprecationWarning, __name__, 0) # There are 4 basic classes: # - Example: a <source, want> pair, plus an intra-docstring line number. # - DocTest: a collection of examples, parsed from a docstring, plus # info about where the docstring came from (name, filename, lineno). # - DocTestFinder: extracts DocTests from a given object's docstring and # its contained objects' docstrings. # - DocTestRunner: runs DocTest cases, and accumulates statistics. # # So the basic picture is: # # list of: # +------+ +---------+ +-------+ # |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| # +------+ +---------+ +-------+ # | Example | # | ... | # | Example | # +---------+ # Option constants. OPTIONFLAGS_BY_NAME = {} def register_optionflag(name): # Create a new flag unless `name` is already known. return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') ELLIPSIS = register_optionflag('ELLIPSIS') IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | DONT_ACCEPT_BLANKLINE | NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL) REPORT_UDIFF = register_optionflag('REPORT_UDIFF') REPORT_CDIFF = register_optionflag('REPORT_CDIFF') REPORT_NDIFF = register_optionflag('REPORT_NDIFF') REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') REPORTING_FLAGS = (REPORT_UDIFF | REPORT_CDIFF | REPORT_NDIFF | REPORT_ONLY_FIRST_FAILURE) # Special string markers for use in `want` strings: BLANKLINE_MARKER = '<BLANKLINE>' ELLIPSIS_MARKER = '...' ###################################################################### ## Table of Contents ###################################################################### # 1. Utility Functions # 2. Example & DocTest -- store test cases # 3. DocTest Parser -- extracts examples from strings # 4. DocTest Finder -- extracts test cases from objects # 5. DocTest Runner -- runs test cases # 6. Test Functions -- convenient wrappers for testing # 7. Tester Class -- for backwards compatibility # 8. Unittest Support # 9. Debugging Support # 10. Example Usage ###################################################################### ## 1. Utility Functions ###################################################################### def is_private(prefix, base): """prefix, base -> true iff name prefix + "." + base is "private". Prefix may be an empty string, and base does not contain a period. Prefix is ignored (although functions you write conforming to this protocol may make use of it). Return true iff base begins with an (at least one) underscore, but does not both begin and end with (at least) two underscores. >>> is_private("a.b", "my_func") False >>> is_private("____", "_my_func") True >>> is_private("someclass", "__init__") False >>> is_private("sometypo", "__init_") True >>> is_private("x.y.z", "_") True >>> is_private("_x.y.z", "__") False >>> is_private("", "") # senseless but consistent False """ warnings.warn("is_private is deprecated; it wasn't useful; " "examine DocTestFinder.find() lists instead", DeprecationWarning, stacklevel=2) return base[:1] == "_" and not base[:2] == "__" == base[-2:] def _extract_future_flags(globs): """ Return the compiler-flags associated with the future features that have been imported into the given namespace (globs). """ flags = 0 for fname in __future__.all_feature_names: feature = globs.get(fname, None) if feature is getattr(__future__, fname): flags |= feature.compiler_flag return flags def _normalize_module(module, depth=2): """ Return the module specified by `module`. In particular: - If `module` is a module, then return module. - If `module` is a string, then import and return the module with that name. - If `module` is None, then return the calling module. The calling module is assumed to be the module of the stack frame at the given depth in the call stack. """ if inspect.ismodule(module): return module elif isinstance(module, (str, unicode)): return __import__(module, globals(), locals(), ["*"]) elif module is None: return sys.modules[sys._getframe(depth).f_globals['__name__']] else: raise TypeError("Expected a module, string, or None") def _indent(s, indent=4): """ Add the given number of space characters to the beginning every non-blank line in `s`, and return the result. """ # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s) def _exception_traceback(exc_info): """ Return a string containing a traceback message for the given exc_info tuple (as returned by sys.exc_info()). """ # Get a traceback message. excout = StringIO() exc_type, exc_val, exc_tb = exc_info traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) return excout.getvalue() # Override some StringIO methods. class _SpoofOut(StringIO): def getvalue(self): result = StringIO.getvalue(self) # If anything at all was written, make sure there's a trailing # newline. There's no way for the expected output to indicate # that a trailing newline is missing. if result and not result.endswith("\n"): result += "\n" # Prevent softspace from screwing up the next test case, in # case they used print with a trailing comma in an example. if hasattr(self, "softspace"): del self.softspace return result def truncate(self, size=None): StringIO.truncate(self, size) if hasattr(self, "softspace"): del self.softspace # Worst-case linear-time ellipsis matching. def _ellipsis_match(want, got): """ Essentially the only subtle case: >>> _ellipsis_match('aa...aa', 'aaa') False """ if ELLIPSIS_MARKER not in want: return want == got # Find "the real" strings. ws = want.split(ELLIPSIS_MARKER) assert len(ws) >= 2 # Deal with exact matches possibly needed at one or both ends. startpos, endpos = 0, len(got) w = ws[0] if w: # starts with exact match if got.startswith(w): startpos = len(w) del ws[0] else: return False w = ws[-1] if w: # ends with exact match if got.endswith(w): endpos -= len(w) del ws[-1] else: return False if startpos > endpos: # Exact end matches required more characters than we have, as in # _ellipsis_match('aa...aa', 'aaa') return False # For the rest, we only need to find the leftmost non-overlapping # match for each piece. If there's no overall match that way alone, # there's no overall match period. for w in ws: # w may be '' at times, if there are consecutive ellipses, or # due to an ellipsis at the start or end of `want`. That's OK. # Search for an empty string succeeds, and doesn't change startpos. startpos = got.find(w, startpos, endpos) if startpos < 0: return False startpos += len(w) return True def _comment_line(line): "Return a commented form of the given line" line = line.rstrip() if line: return '# '+line else: return '#' class _OutputRedirectingPdb(pdb.Pdb): """ A specialized version of the python debugger that redirects stdout to a given stream when interacting with the user. Stdout is *not* redirected when traced code is executed. """ def __init__(self, out): self.__out = out pdb.Pdb.__init__(self) def trace_dispatch(self, *args): # Redirect stdout to the given stream. save_stdout = sys.stdout sys.stdout = self.__out # Call Pdb's trace dispatch method. try: return pdb.Pdb.trace_dispatch(self, *args) finally: sys.stdout = save_stdout # [XX] Normalize with respect to os.path.pardir? def _module_relative_path(module, path): if not inspect.ismodule(module): raise TypeError, 'Expected a module: %r' % module if path.startswith('/'): raise ValueError, 'Module-relative files may not have absolute paths' # Find the base directory for the path. if hasattr(module, '__file__'): # A normal module/package basedir = os.path.split(module.__file__)[0] elif module.__name__ == '__main__': # An interactive session. if len(sys.argv)>0 and sys.argv[0] != '': basedir = os.path.split(sys.argv[0])[0] else: basedir = os.curdir else: # A module w/o __file__ (this includes builtins) raise ValueError("Can't resolve paths relative to the module " + module + " (it has no __file__)") # Combine the base directory and the path. return os.path.join(basedir, *(path.split('/'))) ###################################################################### ## 2. Example & DocTest ###################################################################### ## - An "example" is a <source, want> pair, where "source" is a ## fragment of source code, and "want" is the expected output for ## "source." The Example class also includes information about ## where the example was extracted from. ## ## - A "doctest" is a collection of examples, typically extracted from ## a string (such as an object's docstring). The DocTest class also ## includes information about where the string was extracted from. class Example: """ A single doctest example, consisting of source code and expected output. `Example` defines the following attributes: - source: A single Python statement, always ending with a newline. The constructor adds a newline if needed. - want: The expected output from running the source code (either from stdout, or a traceback in case of exception). `want` ends with a newline unless it's empty, in which case it's an empty string. The constructor adds a newline if needed. - exc_msg: The exception message generated by the example, if the example is expected to generate an exception; or `None` if it is not expected to generate an exception. This exception message is compared against the return value of `traceback.format_exception_only()`. `exc_msg` ends with a newline unless it's `None`. The constructor adds a newline if needed. - lineno: The line number within the DocTest string containing this Example where the Example begins. This line number is zero-based, with respect to the beginning of the DocTest. - indent: The example's indentation in the DocTest string. I.e., the number of space characters that preceed the example's first prompt. - options: A dictionary mapping from option flags to True or False, which is used to override default options for this example. Any option flags not contained in this dictionary are left at their default value (as specified by the DocTestRunner's optionflags). By default, no options are set. """ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, options=None): # Normalize inputs. if not source.endswith('\n'): source += '\n' if want and not want.endswith('\n'): want += '\n' if exc_msg is not None and not exc_msg.endswith('\n'): exc_msg += '\n' # Store properties. self.source = source self.want = want self.lineno = lineno self.indent = indent if options is None: options = {} self.options = options self.exc_msg = exc_msg class DocTest: """ A collection of doctest examples that should be run in a single namespace. Each `DocTest` defines the following attributes: - examples: the list of examples. - globs: The namespace (aka globals) that the examples should be run in. - name: A name identifying the DocTest (typically, the name of the object whose docstring this DocTest was extracted from). - filename: The name of the file that this DocTest was extracted from, or `None` if the filename is unknown. - lineno: The line number within filename where this DocTest begins, or `None` if the line number is unavailable. This line number is zero-based, with respect to the beginning of the file. - docstring: The string that the examples were extracted from, or `None` if the string is unavailable. """ def __init__(self, examples, globs, name, filename, lineno, docstring): """ Create a new DocTest containing the given examples. The DocTest's globals are initialized with a copy of `globs`. """ assert not isinstance(examples, basestring), \ "DocTest no longer accepts str; use DocTestParser instead" self.examples = examples self.docstring = docstring self.globs = globs.copy() self.name = name self.filename = filename self.lineno = lineno def __repr__(self): if len(self.examples) == 0: examples = 'no examples' elif len(self.examples) == 1: examples = '1 example' else: examples = '%d examples' % len(self.examples) return ('<DocTest %s from %s:%s (%s)>' % (self.name, self.filename, self.lineno, examples)) # This lets us sort tests by name: def __cmp__(self, other): if not isinstance(other, DocTest): return -1 return cmp((self.name, self.filename, self.lineno, id(self)), (other.name, other.filename, other.lineno, id(other))) ###################################################################### ## 3. DocTestParser ###################################################################### class DocTestParser: """ A class used to parse strings containing doctest examples. """ # This regular expression is used to find doctest examples in a # string. It defines three groups: `source` is the source code # (including leading indentation and prompts); `indent` is the # indentation of the first (PS1) line of the source code; and # `want` is the expected output (including leading indentation). _EXAMPLE_RE = re.compile(r''' # Source consists of a PS1 line followed by zero or more PS2 lines. (?P<source> (?:^(?P<indent> [ ]*) >>> .*) # PS1 line (?:\n [ ]* \.\.\. .*)*) # PS2 lines \n? # Want consists of any non-blank lines that do not start with PS1. (?P<want> (?:(?![ ]*$) # Not a blank line (?![ ]*>>>) # Not a line starting with PS1 .*$\n? # But any other line )*) ''', re.MULTILINE | re.VERBOSE) # A regular expression for handling `want` strings that contain # expected exceptions. It divides `want` into three pieces: # - the traceback header line (`hdr`) # - the traceback stack (`stack`) # - the exception message (`msg`), as generated by # traceback.format_exception_only() # `msg` may have multiple lines. We assume/require that the # exception message is the first non-indented line starting with a word # character following the traceback header line. _EXCEPTION_RE = re.compile(r""" # Grab the traceback header. Different versions of Python have # said different things on the first traceback line. ^(?P<hdr> Traceback\ \( (?: most\ recent\ call\ last | innermost\ last ) \) : ) \s* $ # toss trailing whitespace on the header. (?P<stack> .*?) # don't blink: absorb stuff until... ^ (?P<msg> \w+ .*) # a line *starts* with alphanum. """, re.VERBOSE | re.MULTILINE | re.DOTALL) # A callable returning a true value iff its argument is a blank line # or contains a single comment. _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match def parse(self, string, name='<string>'): """ Divide the given string into examples and intervening text, and return them as a list of alternating Examples and strings. Line numbers for the Examples are 0-based. The optional argument `name` is a name identifying this string, and is only used for error messages. """ string = string.expandtabs() # If all lines begin with the same indentation, then strip it. min_indent = self._min_indent(string) if min_indent > 0: string = '\n'.join([l[min_indent:] for l in string.split('\n')]) output = [] charno, lineno = 0, 0 # Find all doctest examples in the string: for m in self._EXAMPLE_RE.finditer(string): # Add the pre-example text to `output`. output.append(string[charno:m.start()]) # Update lineno (lines before this example) lineno += string.count('\n', charno, m.start()) # Extract info from the regexp match. (source, options, want, exc_msg) = \ self._parse_example(m, name, lineno) # Create an Example, and add it to the list. if not self._IS_BLANK_OR_COMMENT(source): output.append( Example(source, want, exc_msg, lineno=lineno, indent=min_indent+len(m.group('indent')), options=options) ) # Update lineno (lines inside this example) lineno += string.count('\n', m.start(), m.end()) # Update charno. charno = m.end() # Add any remaining post-example text to `output`. output.append(string[charno:]) return output def get_doctest(self, string, globs, name, filename, lineno): """ Extract all doctest examples from the given string, and collect them into a `DocTest` object. `globs`, `name`, `filename`, and `lineno` are attributes for the new `DocTest` object. See the documentation for `DocTest` for more information. """ return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string) def get_examples(self, string, name='<string>'): """ Extract all doctest examples from the given string, and return them as a list of `Example` objects. Line numbers are 0-based, because it's most common in doctests that nothing interesting appears on the same line as opening triple-quote, and so the first interesting line is called \"line 1\" then. The optional argument `name` is a name identifying this string, and is only used for error messages. """ return [x for x in self.parse(string, name) if isinstance(x, Example)] def _parse_example(self, m, name, lineno): """ Given a regular expression match from `_EXAMPLE_RE` (`m`), return a pair `(source, want)`, where `source` is the matched example's source code (with prompts and indentation stripped); and `want` is the example's expected output (with indentation stripped). `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. """ # Get the example's indentation level. indent = len(m.group('indent')) # Divide source into lines; check that they're properly # indented; and then strip their indentation & prompts. source_lines = m.group('source').split('\n') self._check_prompt_blank(source_lines, indent, name, lineno) self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) source = '\n'.join([sl[indent+4:] for sl in source_lines]) # Divide want into lines; check that it's properly indented; and # then strip the indentation. Spaces before the last newline should # be preserved, so plain rstrip() isn't good enough. want = m.group('want') want_lines = want.split('\n') if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): del want_lines[-1] # forget final newline & spaces after it self._check_prefix(want_lines, ' '*indent, name, lineno + len(source_lines)) want = '\n'.join([wl[indent:] for wl in want_lines]) # If `want` contains a traceback message, then extract it. m = self._EXCEPTION_RE.match(want) if m: exc_msg = m.group('msg') else: exc_msg = None # Extract options from the source. options = self._find_options(source, name, lineno) return source, options, want, exc_msg # This regular expression looks for option directives in the # source code of an example. Option directives are comments # starting with "doctest:". Warning: this may give false # positives for string-literals that contain the string # "#doctest:". Eliminating these false positives would require # actually parsing the string; but we limit them by ignoring any # line containing "#doctest:" that is *followed* by a quote mark. _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', re.MULTILINE) def _find_options(self, source, name, lineno): """ Return a dictionary containing option overrides extracted from option directives in the given source string. `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. """ options = {} # (note: with the current regexp, this will match at most once:) for m in self._OPTION_DIRECTIVE_RE.finditer(source): option_strings = m.group(1).replace(',', ' ').split() for option in option_strings: if (option[0] not in '+-' or option[1:] not in OPTIONFLAGS_BY_NAME): raise ValueError('line %r of the doctest for %s ' 'has an invalid option: %r' % (lineno+1, name, option)) flag = OPTIONFLAGS_BY_NAME[option[1:]] options[flag] = (option[0] == '+') if options and self._IS_BLANK_OR_COMMENT(source): raise ValueError('line %r of the doctest for %s has an option ' 'directive on a line with no example: %r' % (lineno, name, source)) return options # This regular expression finds the indentation of every non-blank # line in a string. _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) def _min_indent(self, s): "Return the minimum indentation of any non-blank line in `s`" indents = [len(indent) for indent in self._INDENT_RE.findall(s)] if len(indents) > 0: return min(indents) else: return 0 def _check_prompt_blank(self, lines, indent, name, lineno): """ Given the lines of a source string (including prompts and leading indentation), check to make sure that every prompt is followed by a space character. If any line is not followed by a space character, then raise ValueError. """ for i, line in enumerate(lines): if len(line) >= indent+4 and line[indent+3] != ' ': raise ValueError('line %r of the docstring for %s ' 'lacks blank after %s: %r' % (lineno+i+1, name, line[indent:indent+3], line)) def _check_prefix(self, lines, prefix, name, lineno): """ Check that every line in the given list starts with the given prefix; if any line does not, then raise a ValueError. """ for i, line in enumerate(lines): if line and not line.startswith(prefix): raise ValueError('line %r of the docstring for %s has ' 'inconsistent leading whitespace: %r' % (lineno+i+1, name, line)) ###################################################################### ## 4. DocTest Finder ###################################################################### class DocTestFinder: """ A class used to extract the DocTests that are relevant to a given object, from its docstring and the docstrings of its contained objects. Doctests can currently be extracted from the following object types: modules, functions, classes, methods, staticmethods, classmethods, and properties. """ def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, _namefilter=None, exclude_empty=True): """ Create a new doctest finder. The optional argument `parser` specifies a class or function that should be used to create new DocTest objects (or objects that implement the same interface as DocTest). The signature for this factory function should match the signature of the DocTest constructor. If the optional argument `recurse` is false, then `find` will only examine the given object, and not any contained objects. If the optional argument `exclude_empty` is false, then `find` will include tests for objects with empty docstrings. """ self._parser = parser self._verbose = verbose self._recurse = recurse self._exclude_empty = exclude_empty # _namefilter is undocumented, and exists only for temporary backward- # compatibility support of testmod's deprecated isprivate mess. self._namefilter = _namefilter def find(self, obj, name=None, module=None, globs=None, extraglobs=None): """ Return a list of the DocTests that are defined by the given object's docstring, or by any of its contained objects' docstrings. The optional parameter `module` is the module that contains the given object. If the module is not specified or is None, then the test finder will attempt to automatically determine the correct module. The object's module is used: - As a default namespace, if `globs` is not specified. - To prevent the DocTestFinder from extracting DocTests from objects that are imported from other modules. - To find the name of the file containing the object. - To help find the line number of the object within its file. Contained objects whose module does not match `module` are ignored. If `module` is False, no attempt to find the module will be made. This is obscure, of use mostly in tests: if `module` is False, or is None but cannot be found automatically, then all objects are considered to belong to the (non-existent) module, so all contained objects will (recursively) be searched for doctests. The globals for each DocTest is formed by combining `globs` and `extraglobs` (bindings in `extraglobs` override bindings in `globs`). A new copy of the globals dictionary is created for each DocTest. If `globs` is not specified, then it defaults to the module's `__dict__`, if specified, or {} otherwise. If `extraglobs` is not specified, then it defaults to {}. """ # If name was not specified, then extract it from the object. if name is None: name = getattr(obj, '__name__', None) if name is None: raise ValueError("DocTestFinder.find: name must be given " "when obj.__name__ doesn't exist: %r" % (type(obj),)) # Find the module that contains the given object (if obj is # a module, then module=obj.). Note: this may fail, in which # case module will be None. if module is False: module = None elif module is None: module = inspect.getmodule(obj) # Read the module's source code. This is used by # DocTestFinder._find_lineno to find the line number for a # given object's docstring. try: file = inspect.getsourcefile(obj) or inspect.getfile(obj) source_lines = linecache.getlines(file) if not source_lines: source_lines = None except TypeError: source_lines = None # Initialize globals, and merge in extraglobs. if globs is None: if module is None: globs = {} else: globs = module.__dict__.copy() else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) # Recursively expore `obj`, extracting DocTests. tests = [] self._find(tests, obj, name, module, source_lines, globs, {}) # Sort the tests by alpha order of names, for consistency in # verbose-mode output. This was a feature of doctest in Pythons # <= 2.3 that got lost by accident in 2.4. It was repaired in # 2.4.4 and 2.5. tests.sort() return tests def _filter(self, obj, prefix, base): """ Return true if the given object should not be examined. """ return (self._namefilter is not None and self._namefilter(prefix, base)) def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: return True elif inspect.isfunction(object): return module.__dict__ is object.func_globals elif inspect.isclass(object): return module.__name__ == object.__module__ elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ if self._verbose: print 'Finding tests in %s' % name # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) # Look for tests in a module's contained objects. if inspect.ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): # Check if this contained object should be ignored. if self._filter(val, name, valname): continue valname = '%s.%s' % (name, valname) # Recurse to functions & classes. if ((inspect.isfunction(val) or inspect.isclass(val)) and self._from_module(module, val)): self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a module's __test__ dictionary. if inspect.ismodule(obj) and self._recurse: for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, basestring): raise ValueError("DocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, basestring)): raise ValueError("DocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj) and self._recurse: for valname, val in obj.__dict__.items(): # Check if this contained object should be ignored. if self._filter(val, name, valname): continue # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).im_func # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) def _get_test(self, obj, name, module, globs, source_lines): """ Return a DocTest for the given object, if it defines a docstring; otherwise, return None. """ # Extract the object's docstring. If it doesn't have one, # then return None (no test for this object). if isinstance(obj, basestring): docstring = obj else: try: if obj.__doc__ is None: docstring = '' else: docstring = obj.__doc__ if not isinstance(docstring, basestring): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' # Find the docstring's location in the file. lineno = self._find_lineno(obj, source_lines) # Don't bother if the docstring is empty. if self._exclude_empty and not docstring: return None # Return a DocTest for this object. if module is None: filename = None else: filename = getattr(module, '__file__', module.__name__) if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] return self._parser.get_doctest(docstring, globs, name, filename, lineno) def _find_lineno(self, obj, source_lines): """ Return a line number of the given object's docstring. Note: this method assumes that the object has a docstring. """ lineno = None # Find the line number for modules. if inspect.ismodule(obj): lineno = 0 # Find the line number for classes. # Note: this could be fooled if a class is defined multiple # times in a single file. if inspect.isclass(obj): if source_lines is None: return None pat = re.compile(r'^\s*class\s*%s\b' % getattr(obj, '__name__', '-')) for i, line in enumerate(source_lines): if pat.match(line): lineno = i break # Find the line number for functions & methods. if inspect.ismethod(obj): obj = obj.im_func if inspect.isfunction(obj): obj = obj.func_code if inspect.istraceback(obj): obj = obj.tb_frame if inspect.isframe(obj): obj = obj.f_code if inspect.iscode(obj): lineno = getattr(obj, 'co_firstlineno', None)-1 # Find the line number where the docstring starts. Assume # that it's the first line that begins with a quote mark. # Note: this could be fooled by a multiline function # signature, where a continuation line begins with a quote # mark. if lineno is not None: if source_lines is None: return lineno+1 pat = re.compile('(^|.*:)\s*\w*("|\')') for lineno in range(lineno, len(source_lines)): if pat.match(source_lines[lineno]): return lineno # We couldn't find the line number. return None ###################################################################### ## 5. DocTest Runner ###################################################################### class DocTestRunner: """ A class used to run DocTest test cases, and accumulate statistics. The `run` method is used to process a single DocTest case. It returns a tuple `(f, t)`, where `t` is the number of test cases tried, and `f` is the number of test cases that failed. >>> tests = DocTestFinder().find(_TestClass) >>> runner = DocTestRunner(verbose=False) >>> tests.sort(key = lambda test: test.name) >>> for test in tests: ... print test.name, '->', runner.run(test) _TestClass -> (0, 2) _TestClass.__init__ -> (0, 2) _TestClass.get -> (0, 2) _TestClass.square -> (0, 1) The `summarize` method prints a summary of all the test cases that have been run by the runner, and returns an aggregated `(f, t)` tuple: >>> runner.summarize(verbose=1) 4 items passed all tests: 2 tests in _TestClass 2 tests in _TestClass.__init__ 2 tests in _TestClass.get 1 tests in _TestClass.square 7 tests in 4 items. 7 passed and 0 failed. Test passed. (0, 7) The aggregated number of tried examples and failed examples is also available via the `tries` and `failures` attributes: >>> runner.tries 7 >>> runner.failures 0 The comparison between expected outputs and actual outputs is done by an `OutputChecker`. This comparison may be customized with a number of option flags; see the documentation for `testmod` for more information. If the option flags are insufficient, then the comparison may also be customized by passing a subclass of `OutputChecker` to the constructor. The test runner's display output can be controlled in two ways. First, an output function (`out) can be passed to `TestRunner.run`; this function will be called with strings that should be displayed. It defaults to `sys.stdout.write`. If capturing the output is not sufficient, then the display output can be also customized by subclassing DocTestRunner, and overriding the methods `report_start`, `report_success`, `report_unexpected_exception`, and `report_failure`. """ # This divider string is used to separate failure messages, and to # separate sections of the summary. DIVIDER = "*" * 70 def __init__(self, checker=None, verbose=None, optionflags=0): """ Create a new test runner. Optional keyword arg `checker` is the `OutputChecker` that should be used to compare the expected outputs and actual outputs of doctest examples. Optional keyword arg 'verbose' prints lots of stuff if true, only failures if false; by default, it's true iff '-v' is in sys.argv. Optional argument `optionflags` can be used to control how the test runner compares expected output to actual output, and how it displays failures. See the documentation for `testmod` for more information. """ self._checker = checker or OutputChecker() if verbose is None: verbose = '-v' in sys.argv self._verbose = verbose self.optionflags = optionflags self.original_optionflags = optionflags # Keep track of the examples we've run. self.tries = 0 self.failures = 0 self._name2ft = {} # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() #///////////////////////////////////////////////////////////////// # Reporting methods #///////////////////////////////////////////////////////////////// def report_start(self, out, test, example): """ Report that the test runner is about to process the given example. (Only displays a message if verbose=True) """ if self._verbose: if example.want: out('Trying:\n' + _indent(example.source) + 'Expecting:\n' + _indent(example.want)) else: out('Trying:\n' + _indent(example.source) + 'Expecting nothing\n') def report_success(self, out, test, example, got): """ Report that the given example ran successfully. (Only displays a message if verbose=True) """ if self._verbose: out("ok\n") def report_failure(self, out, test, example, got): """ Report that the given example failed. """ out(self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)) def report_unexpected_exception(self, out, test, example, exc_info): """ Report that the given example raised an unexpected exception. """ out(self._failure_header(test, example) + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) def _failure_header(self, test, example): out = [self.DIVIDER] if test.filename: if test.lineno is not None and example.lineno is not None: lineno = test.lineno + example.lineno + 1 else: lineno = '?' out.append('File "%s", line %s, in %s' % (test.filename, lineno, test.name)) else: out.append('Line %s, in %s' % (example.lineno+1, test.name)) out.append('Failed example:') source = example.source out.append(_indent(source)) return '\n'.join(out) #///////////////////////////////////////////////////////////////// # DocTest Running #///////////////////////////////////////////////////////////////// def __run(self, test, compileflags, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then supress # reporting after the first failure. quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Use a special filename for compile(), so we can retrieve # the source code during interactive debugging (see # __patched_linecache_getlines). filename = '<doctest %s[%d]>' % (test.name, examplenum) # Run the example in the given context (globs), and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) try: # Don't blink! This is where the user's code gets run. exec compile(example.source, filename, "single", compileflags, 1) in test.globs self.debugger.set_continue() # ==== Example Finished ==== exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() self.debugger.set_continue() # ==== Example Finished ==== got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_info = sys.exc_info() exc_msg = traceback.format_exception_only(*exc_info[:2])[-1] if not quiet: got += _exception_traceback(exc_info) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & IGNORE_EXCEPTION_DETAIL: m1 = re.match(r'[^:]*:', example.exc_msg) m2 = re.match(r'[^:]*:', exc_msg) if m1 and m2 and check(m1.group(0), m2.group(0), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exc_info) failures += 1 else: assert False, ("unknown outcome", outcome) # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self.__record_outcome(test, failures, tries) return failures, tries def __record_outcome(self, test, f, t): """ Record the fact that the given DocTest (`test`) generated `f` failures out of `t` tried examples. """ f2, t2 = self._name2ft.get(test.name, (0,0)) self._name2ft[test.name] = (f+f2, t+t2) self.failures += f self.tries += t __LINECACHE_FILENAME_RE = re.compile(r'<doctest ' r'(?P<name>[\w\.]+)' r'\[(?P<examplenum>\d+)\]>$') def __patched_linecache_getlines(self, filename, additional_arg=None): m = self.__LINECACHE_FILENAME_RE.match(filename) if m and m.group('name') == self.test.name: example = self.test.examples[int(m.group('examplenum'))] return example.source.splitlines(True) else: return self.save_linecache_getlines(filename) def run(self, test, compileflags=None, out=None, clear_globs=True): """ Run the examples in `test`, and display the results using the writer function `out`. The examples are run in the namespace `test.globs`. If `clear_globs` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use `clear_globs=False`. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. The output of each example is checked using `DocTestRunner.check_output`, and the results are formatted by the `DocTestRunner.report_*` methods. """ self.test = test if compileflags is None: compileflags = _extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: out = save_stdout.write sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = _OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = linecache.getlines linecache.getlines = self.__patched_linecache_getlines try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear() #///////////////////////////////////////////////////////////////// # Summarization #///////////////////////////////////////////////////////////////// def summarize(self, verbose=None): """ Print a summary of all the test cases that have been run by this DocTestRunner, and return a tuple `(f, t)`, where `f` is the total number of failed examples, and `t` is the total number of tried examples. The optional `verbose` argument controls how detailed the summary is. If the verbosity is not specified, then the DocTestRunner's verbosity is used. """ if verbose is None: verbose = self._verbose notests = [] passed = [] failed = [] totalt = totalf = 0 for x in self._name2ft.items(): name, (f, t) = x assert f <= t totalt += t totalf += f if t == 0: notests.append(name) elif f == 0: passed.append( (name, t) ) else: failed.append(x) if verbose: if notests: print len(notests), "items had no tests:" notests.sort() for thing in notests: print " ", thing if passed: print len(passed), "items passed all tests:" passed.sort() for thing, count in passed: print " %3d tests in %s" % (count, thing) if failed: print self.DIVIDER print len(failed), "items had failures:" failed.sort() for thing, (f, t) in failed: print " %3d of %3d in %s" % (f, t, thing) if verbose: print totalt, "tests in", len(self._name2ft), "items." print totalt - totalf, "passed and", totalf, "failed." if totalf: print "***Test Failed***", totalf, "failures." elif verbose: print "Test passed." return totalf, totalt #///////////////////////////////////////////////////////////////// # Backward compatibility cruft to maintain doctest.master. #///////////////////////////////////////////////////////////////// def merge(self, other): d = self._name2ft for name, (f, t) in other._name2ft.items(): if name in d: print "*** DocTestRunner.merge: '" + name + "' in both" \ " testers; summing outcomes." f2, t2 = d[name] f = f + f2 t = t + t2 d[name] = f, t class OutputChecker: """ A class used to check the whether the actual output from a doctest example matches the expected output. `OutputChecker` defines two methods: `check_output`, which compares a given pair of outputs, and returns true if they match; and `output_difference`, which returns a string describing the differences between two outputs. """ def check_output(self, want, got, optionflags): """ Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. """ # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # The values True and False replaced 1 and 0 as the return # value for boolean comparisons in Python 2.3. if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): if (got,want) == ("True\n", "1\n"): return True if (got,want) == ("False\n", "0\n"): return True # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub('(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & ELLIPSIS: if _ellipsis_match(want, got): return True # We didn't find any match; return false. return False # Should we do a fancy diff? def _do_a_fancy_diff(self, want, got, optionflags): # Not unless they asked for a fancy diff. if not optionflags & (REPORT_UDIFF | REPORT_CDIFF | REPORT_NDIFF): return False # If expected output uses ellipsis, a meaningful fancy diff is # too hard ... or maybe not. In two real-life failures Tim saw, # a diff was a major help anyway, so this is commented out. # [todo] _ellipsis_match() knows which pieces do and don't match, # and could be the basis for a kick-ass diff in this case. ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: ## return False # ndiff does intraline difference marking, so can be useful even # for 1-line differences. if optionflags & REPORT_NDIFF: return True # The other diff types need at least a few lines to be helpful. return want.count('\n') > 2 and got.count('\n') > 2 def output_difference(self, example, got, optionflags): """ Return a string describing the differences between the expected output for a given example (`example`) and the actual output (`got`). `optionflags` is the set of option flags used to compare `want` and `got`. """ want = example.want # If <BLANKLINE>s are being used, then replace blank lines # with <BLANKLINE> in the actual output string. if not (optionflags & DONT_ACCEPT_BLANKLINE): got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) # Check if we should use diff. if self._do_a_fancy_diff(want, got, optionflags): # Split want & got into lines. want_lines = want.splitlines(True) # True == keep line ends got_lines = got.splitlines(True) # Use difflib to find their differences. if optionflags & REPORT_UDIFF: diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'unified diff with -expected +actual' elif optionflags & REPORT_CDIFF: diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'context diff with expected followed by actual' elif optionflags & REPORT_NDIFF: engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: assert 0, 'Bad diff option' # Remove trailing whitespace on diff output. diff = [line.rstrip() + '\n' for line in diff] return 'Differences (%s):\n' % kind + _indent(''.join(diff)) # If we're not using diff, then simply list the expected # output followed by the actual output. if want and got: return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) elif want: return 'Expected:\n%sGot nothing\n' % _indent(want) elif got: return 'Expected nothing\nGot:\n%s' % _indent(got) else: return 'Expected nothing\nGot nothing\n' class DocTestFailure(Exception): """A DocTest example has failed in debugging mode. The exception instance has variables: - test: the DocTest object being run - excample: the Example object that failed - got: the actual output """ def __init__(self, test, example, got): self.test = test self.example = example self.got = got def __str__(self): return str(self.test) class UnexpectedException(Exception): """A DocTest example has encountered an unexpected exception The exception instance has variables: - test: the DocTest object being run - excample: the Example object that failed - exc_info: the exception info """ def __init__(self, test, example, exc_info): self.test = test self.example = example self.exc_info = exc_info def __str__(self): return str(self.test) class DebugRunner(DocTestRunner): r"""Run doc tests but raise an exception as soon as there is a failure. If an unexpected exception occurs, an UnexpectedException is raised. It contains the test, the example, and the original exception: >>> runner = DebugRunner(verbose=False) >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', ... {}, 'foo', 'foo.py', 0) >>> try: ... runner.run(test) ... except UnexpectedException, failure: ... pass >>> failure.test is test True >>> failure.example.want '42\n' >>> exc_info = failure.exc_info >>> raise exc_info[0], exc_info[1], exc_info[2] Traceback (most recent call last): ... KeyError We wrap the original exception to give the calling application access to the test and example information. If the output doesn't match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(''' ... >>> x = 1 ... >>> x ... 2 ... ''', {}, 'foo', 'foo.py', 0) >>> try: ... runner.run(test) ... except DocTestFailure, failure: ... pass DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want '2\n' and the actual output: >>> failure.got '1\n' If a failure or error occurs, the globals are left intact: >>> del test.globs['__builtins__'] >>> test.globs {'x': 1} >>> test = DocTestParser().get_doctest(''' ... >>> x = 2 ... >>> raise KeyError ... ''', {}, 'foo', 'foo.py', 0) >>> runner.run(test) Traceback (most recent call last): ... UnexpectedException: <DocTest foo from foo.py:0 (2 examples)> >>> del test.globs['__builtins__'] >>> test.globs {'x': 2} But the globals are cleared if there is no error: >>> test = DocTestParser().get_doctest(''' ... >>> x = 2 ... ''', {}, 'foo', 'foo.py', 0) >>> runner.run(test) (0, 1) >>> test.globs {} """ def run(self, test, compileflags=None, out=None, clear_globs=True): r = DocTestRunner.run(self, test, compileflags, out, False) if clear_globs: test.globs.clear() return r def report_unexpected_exception(self, out, test, example, exc_info): raise UnexpectedException(test, example, exc_info) def report_failure(self, out, test, example, got): raise DocTestFailure(test, example, got) ###################################################################### ## 6. Test Functions ###################################################################### # These should be backwards compatible. # For backward compatibility, a global instance of a DocTestRunner # class, updated by testmod. master = None def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, exclude_empty=False): """m=None, name=None, globs=None, verbose=None, isprivate=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, exclude_empty=False Test examples in docstrings in functions and classes reachable from module m (or the current module if m is not supplied), starting with m.__doc__. Unless isprivate is specified, private names are not skipped. Also test examples reachable from dict m.__test__ if it exists and is not None. m.__test__ maps names to functions, classes and strings; function and class docstrings are tested even if the name is private; strings are tested directly, as if they were docstrings. Return (#failures, #tests). See doctest.__doc__ for an overview. Optional keyword arg "name" gives the name of the module; by default use m.__name__. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use m.__dict__. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. This is new in 2.4. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. This is new in 2.3. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Deprecated in Python 2.4: Optional keyword arg "isprivate" specifies a function used to determine whether a name is private. The default function is treat all functions as public. Optionally, "isprivate" can be set to doctest.is_private to skip over functions marked as private using the underscore naming convention; see its docs for details. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master if isprivate is not None: warnings.warn("the isprivate argument is deprecated; " "examine DocTestFinder.find() lists instead", DeprecationWarning) # If no module was given, then use __main__. if m is None: # DWA - m will still be None if this wasn't invoked from the command # line, in which case the following TypeError is about as good an error # as we should expect m = sys.modules.get('__main__') # Check that we were actually given a module. if not inspect.ismodule(m): raise TypeError("testmod: module required; %r" % (m,)) # If no name was given, then use the module's name. if name is None: name = m.__name__ # Find, parse, and run all tests in the given module. finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty) if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return runner.failures, runner.tries def testfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=DocTestParser()): """ Test examples in the given file. Return (#failures, #tests). Optional keyword arg "module_relative" specifies how filenames should be interpreted: - If "module_relative" is True (the default), then "filename" specifies a module-relative path. By default, this path is relative to the calling module's directory; but if the "package" argument is specified, then it is relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and should not be an absolute path (i.e., it may not begin with "/"). - If "module_relative" is False, then "filename" specifies an os-specific path. The path may be absolute or relative (to the current working directory). Optional keyword arg "name" gives the name of the test; by default use the file's basename. Optional keyword argument "package" is a Python package or the name of a Python package whose directory should be used as the base directory for a module relative filename. If no package is specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use {}. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Optional keyword arg "parser" specifies a DocTestParser (or subclass) that should be used to extract tests from the files. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path if module_relative: package = _normalize_module(package) filename = _module_relative_path(package, filename) # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) # Read the file, convert it to a test, and run it. s = open(filename).read() test = parser.get_doctest(s, globs, name, filename, 0) runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return runner.failures, runner.tries def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = DocTestFinder(verbose=verbose, recurse=False) runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags) ###################################################################### ## 7. Tester ###################################################################### # This is provided only for backwards compatibility. It's not # actually used in any way. class Tester: def __init__(self, mod=None, globs=None, verbose=None, isprivate=None, optionflags=0): warnings.warn("class Tester is deprecated; " "use class doctest.DocTestRunner instead", DeprecationWarning, stacklevel=2) if mod is None and globs is None: raise TypeError("Tester.__init__: must specify mod or globs") if mod is not None and not inspect.ismodule(mod): raise TypeError("Tester.__init__: mod must be a module; %r" % (mod,)) if globs is None: globs = mod.__dict__ self.globs = globs self.verbose = verbose self.isprivate = isprivate self.optionflags = optionflags self.testfinder = DocTestFinder(_namefilter=isprivate) self.testrunner = DocTestRunner(verbose=verbose, optionflags=optionflags) def runstring(self, s, name): test = DocTestParser().get_doctest(s, self.globs, name, None, None) if self.verbose: print "Running string", name (f,t) = self.testrunner.run(test) if self.verbose: print f, "of", t, "examples failed in string", name return (f,t) def rundoc(self, object, name=None, module=None): f = t = 0 tests = self.testfinder.find(object, name, module=module, globs=self.globs) for test in tests: (f2, t2) = self.testrunner.run(test) (f,t) = (f+f2, t+t2) return (f,t) def rundict(self, d, name, module=None): import new m = new.module(name) m.__dict__.update(d) if module is None: module = False return self.rundoc(m, name, module) def run__test__(self, d, name): import new m = new.module(name) m.__test__ = d return self.rundoc(m, name) def summarize(self, verbose=None): return self.testrunner.summarize(verbose) def merge(self, other): self.testrunner.merge(other.testrunner) ###################################################################### ## 8. Unittest Support ###################################################################### _unittest_reportflags = 0 def set_unittest_reportflags(flags): """Sets the unittest option flags. The old flag is returned so that a runner could restore the old value if it wished to: >>> import doctest >>> old = doctest._unittest_reportflags >>> doctest.set_unittest_reportflags(REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) == old True >>> doctest._unittest_reportflags == (REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) True Only reporting flags can be set: >>> doctest.set_unittest_reportflags(ELLIPSIS) Traceback (most recent call last): ... ValueError: ('Only reporting flags allowed', 8) >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) True """ global _unittest_reportflags if (flags & REPORTING_FLAGS) != flags: raise ValueError("Only reporting flags allowed", flags) old = _unittest_reportflags _unittest_reportflags = flags return old class DocTestCase(unittest.TestCase): def __init__(self, test, optionflags=0, setUp=None, tearDown=None, checker=None): unittest.TestCase.__init__(self) self._dt_optionflags = optionflags self._dt_checker = checker self._dt_test = test self._dt_setUp = setUp self._dt_tearDown = tearDown def setUp(self): test = self._dt_test if self._dt_setUp is not None: self._dt_setUp(test) def tearDown(self): test = self._dt_test if self._dt_tearDown is not None: self._dt_tearDown(test) test.globs.clear() def runTest(self): test = self._dt_test old = sys.stdout new = StringIO() optionflags = self._dt_optionflags if not (optionflags & REPORTING_FLAGS): # The option flags don't include any reporting flags, # so add the default reporting flags optionflags |= _unittest_reportflags runner = DocTestRunner(optionflags=optionflags, checker=self._dt_checker, verbose=False) try: runner.DIVIDER = "-"*70 failures, tries = runner.run( test, out=new.write, clear_globs=False) finally: sys.stdout = old if failures: raise self.failureException(self.format_failure(new.getvalue())) def format_failure(self, err): test = self._dt_test if test.lineno is None: lineno = 'unknown line number' else: lineno = '%s' % test.lineno lname = '.'.join(test.name.split('.')[-1:]) return ('Failed doctest test for %s\n' ' File "%s", line %s, in %s\n\n%s' % (test.name, test.filename, lineno, lname, err) ) def debug(self): r"""Run the test case without results and without catching exceptions The unit test framework includes a debug method on test cases and test suites to support post-mortem debugging. The test code is run in such a way that errors are not caught. This way a caller can catch the errors and initiate post-mortem debugging. The DocTestCase provides a debug method that raises UnexpectedException errors if there is an unexepcted exception: >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', ... {}, 'foo', 'foo.py', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except UnexpectedException, failure: ... pass The UnexpectedException contains the test, the example, and the original exception: >>> failure.test is test True >>> failure.example.want '42\n' >>> exc_info = failure.exc_info >>> raise exc_info[0], exc_info[1], exc_info[2] Traceback (most recent call last): ... KeyError If the output doesn't match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(''' ... >>> x = 1 ... >>> x ... 2 ... ''', {}, 'foo', 'foo.py', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except DocTestFailure, failure: ... pass DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want '2\n' and the actual output: >>> failure.got '1\n' """ self.setUp() runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False) runner.run(self._dt_test) self.tearDown() def id(self): return self._dt_test.name def __repr__(self): name = self._dt_test.name.split('.') return "%s (%s)" % (name[-1], '.'.join(name[:-1])) __str__ = __repr__ def shortDescription(self): return "Doctest: " + self._dt_test.name def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, **options): """ Convert doctest tests for a module to a unittest test suite. This converts each documentation string in a module that contains doctest tests to a unittest test case. If any of the tests in a doc string fail, then the test case fails. An exception is raised showing the name of the file containing the test and a (sometimes approximate) line number. The `module` argument provides the module to be tested. The argument can be either a module or a module name. If no argument is given, the calling module is used. A number of options may be provided as keyword arguments: setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. """ if test_finder is None: test_finder = DocTestFinder() module = _normalize_module(module) tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) if globs is None: globs = module.__dict__ if not tests: # Why do we want to do this? Because it reveals a bug that might # otherwise be hidden. raise ValueError(module, "has no tests") tests.sort() suite = unittest.TestSuite() for test in tests: if len(test.examples) == 0: continue if not test.filename: filename = module.__file__ if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] test.filename = filename suite.addTest(DocTestCase(test, **options)) return suite class DocFileCase(DocTestCase): def id(self): return '_'.join(self._dt_test.name.split('.')) def __repr__(self): return self._dt_test.filename __str__ = __repr__ def format_failure(self, err): return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' % (self._dt_test.name, self._dt_test.filename, err) ) def DocFileTest(path, module_relative=True, package=None, globs=None, parser=DocTestParser(), **options): if globs is None: globs = {} if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path. if module_relative: package = _normalize_module(package) path = _module_relative_path(package, path) # Find the file and read it. name = os.path.basename(path) doc = open(path).read() # Convert it to a test, and wrap it in a DocFileCase. test = parser.get_doctest(doc, globs, name, path, 0) return DocFileCase(test, **options) def DocFileSuite(*paths, **kw): """A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files. """ suite = unittest.TestSuite() # We do this here so that _normalize_module is called at the right # level. If it were called in DocFileTest, then this function # would be the caller and we might guess the package incorrectly. if kw.get('module_relative', True): kw['package'] = _normalize_module(kw.get('package')) for path in paths: suite.addTest(DocFileTest(path, **kw)) return suite ###################################################################### ## 9. Debugging Support ###################################################################### def script_from_examples(s): r"""Extract script from text with examples. Converts text with examples to a Python script. Example input is converted to regular code. Example output and all other words are converted to comments: >>> text = ''' ... Here are examples of simple math. ... ... Python has super accurate integer addition ... ... >>> 2 + 2 ... 5 ... ... And very friendly error messages: ... ... >>> 1/0 ... To Infinity ... And ... Beyond ... ... You can use logic if you want: ... ... >>> if 0: ... ... blah ... ... blah ... ... ... ... Ho hum ... ''' >>> print script_from_examples(text) # Here are examples of simple math. # # Python has super accurate integer addition # 2 + 2 # Expected: ## 5 # # And very friendly error messages: # 1/0 # Expected: ## To Infinity ## And ## Beyond # # You can use logic if you want: # if 0: blah blah # # Ho hum <BLANKLINE> """ output = [] for piece in DocTestParser().parse(s): if isinstance(piece, Example): # Add the example's source code (strip trailing NL) output.append(piece.source[:-1]) # Add the expected output: want = piece.want if want: output.append('# Expected:') output += ['## '+l for l in want.split('\n')[:-1]] else: # Add non-example text. output += [_comment_line(l) for l in piece.split('\n')[:-1]] # Trim junk on both ends. while output and output[-1] == '#': output.pop() while output and output[0] == '#': output.pop(0) # Combine the output, and return it. # Add a courtesy newline to prevent exec from choking (see bug #1172785) return '\n'.join(output) + '\n' def testsource(module, name): """Extract the test sources from a doctest docstring as a script. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the doc string with tests to be debugged. """ module = _normalize_module(module) tests = DocTestFinder().find(module) test = [t for t in tests if t.name == name] if not test: raise ValueError(name, "not found in tests") test = test[0] testsrc = script_from_examples(test.docstring) return testsrc def debug_src(src, pm=False, globs=None): """Debug a single doctest docstring, in argument `src`'""" testsrc = script_from_examples(src) debug_script(testsrc, pm, globs) def debug_script(src, pm=False, globs=None): "Debug a test script. `src` is the script, as a string." import pdb # Note that tempfile.NameTemporaryFile() cannot be used. As the # docs say, a file so created cannot be opened by name a second time # on modern Windows boxes, and execfile() needs to open it. srcfilename = tempfile.mktemp(".py", "doctestdebug") f = open(srcfilename, 'w') f.write(src) f.close() try: if globs: globs = globs.copy() else: globs = {} if pm: try: execfile(srcfilename, globs, globs) except: print sys.exc_info()[1] pdb.post_mortem(sys.exc_info()[2]) else: # Note that %r is vital here. '%s' instead can, e.g., cause # backslashes to get treated as metacharacters on Windows. pdb.run("execfile(%r)" % srcfilename, globs, globs) finally: os.remove(srcfilename) def debug(module, name, pm=False): """Debug a single doctest docstring. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the docstring with tests to be debugged. """ module = _normalize_module(module) testsrc = testsource(module, name) debug_script(testsrc, pm, module.__dict__) ###################################################################### ## 10. Example Usage ###################################################################### class _TestClass: """ A pointless class, for sanity-checking of docstring testing. Methods: square() get() >>> _TestClass(13).get() + _TestClass(-12).get() 1 >>> hex(_TestClass(13).square().get()) '0xa9' """ def __init__(self, val): """val -> _TestClass object with associated value val. >>> t = _TestClass(123) >>> print t.get() 123 """ self.val = val def square(self): """square() -> square TestClass's associated value >>> _TestClass(13).square().get() 169 """ self.val = self.val ** 2 return self def get(self): """get() -> return TestClass's associated value. >>> x = _TestClass(-42) >>> print x.get() -42 """ return self.val __test__ = {"_TestClass": _TestClass, "string": r""" Example of a string object, searched as-is. >>> x = 1; y = 2 >>> x + y, x * y (3, 2) """, "bool-int equivalence": r""" In 2.2, boolean expressions displayed 0 or 1. By default, we still accept them. This can be disabled by passing DONT_ACCEPT_TRUE_FOR_1 to the new optionflags argument. >>> 4 == 4 1 >>> 4 == 4 True >>> 4 > 4 0 >>> 4 > 4 False """, "blank lines": r""" Blank lines can be marked with <BLANKLINE>: >>> print 'foo\n\nbar\n' foo <BLANKLINE> bar <BLANKLINE> """, "ellipsis": r""" If the ellipsis flag is used, then '...' can be used to elide substrings in the desired output: >>> print range(1000) #doctest: +ELLIPSIS [0, 1, 2, ..., 999] """, "whitespace normalization": r""" If the whitespace normalization flag is used, then differences in whitespace are ignored. >>> print range(30) #doctest: +NORMALIZE_WHITESPACE [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] """, } def _test(): r = unittest.TextTestRunner() r.run(DocTestSuite()) if __name__ == "__main__": _test()
Python
import py class Directory(py.test.collect.Directory): def run(self): py.test.skip("compat tests currently need to be run manually")
Python
# subprocess - Subprocesses with accessible I/O streams # # For more information about this module, see PEP 324. # # This module should remain compatible with Python 2.2, see PEP 291. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license for licensing details. r"""subprocess - Subprocesses with accessible I/O streams This module allows you to spawn processes, connect to their input/output/error pipes, and obtain their return codes. This module intends to replace several other, older modules and functions, like: os.system os.spawn* os.popen* popen2.* commands.* Information about how the subprocess module can be used to replace these modules and functions can be found below. Using the subprocess module =========================== This module defines one class called Popen: class Popen(args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): Arguments are: args should be a string, or a sequence of program arguments. The program to execute is normally the first item in the args sequence or string, but can be explicitly set by using the executable argument. On UNIX, with shell=False (default): In this case, the Popen class uses os.execvp() to execute the child program. args should normally be a sequence. A string will be treated as a sequence with the string as the only item (the program to execute). On UNIX, with shell=True: If args is a string, it specifies the command string to execute through the shell. If args is a sequence, the first item specifies the command string, and any additional items will be treated as additional shell arguments. On Windows: the Popen class uses CreateProcess() to execute the child program, which operates on strings. If args is a sequence, it will be converted to a string using the list2cmdline method. Please note that not all MS Windows applications interpret the command line the same way: The list2cmdline is designed for applications using the same rules as the MS C runtime. bufsize, if given, has the same meaning as the corresponding argument to the built-in open() function: 0 means unbuffered, 1 means line buffered, any other positive value means use a buffer of (approximately) that size. A negative bufsize means to use the system default, which usually means fully buffered. The default value for bufsize is 0 (unbuffered). stdin, stdout and stderr specify the executed programs' standard input, standard output and standard error file handles, respectively. Valid values are PIPE, an existing file descriptor (a positive integer), an existing file object, and None. PIPE indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. If preexec_fn is set to a callable object, this object will be called in the child process just before the child is executed. If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. if shell is true, the specified command will be executed through the shell. If cwd is not None, the current directory will be changed to cwd before the child is executed. If env is not None, it defines the environment variables for the new process. If universal_newlines is true, the file objects stdout and stderr are opened as a text files, but lines may be terminated by any of '\n', the Unix end-of-line convention, '\r', the Macintosh convention or '\r\n', the Windows convention. All of these external representations are seen as '\n' by the Python program. Note: This feature is only available if Python is built with universal newline support (the default). Also, the newlines attribute of the file objects stdout, stdin and stderr are not updated by the communicate() method. The startupinfo and creationflags, if given, will be passed to the underlying CreateProcess() function. They can specify things such as appearance of the main window and priority for the new process. (Windows only) This module also defines two shortcut functions: call(*args, **kwargs): Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) Exceptions ---------- Exceptions raised in the child process, before the new program has started to execute, will be re-raised in the parent. Additionally, the exception object will have one extra attribute called 'child_traceback', which is a string containing traceback information from the childs point of view. The most common exception raised is OSError. This occurs, for example, when trying to execute a non-existent file. Applications should prepare for OSErrors. A ValueError will be raised if Popen is called with invalid arguments. Security -------- Unlike some other popen functions, this implementation will never call /bin/sh implicitly. This means that all characters, including shell metacharacters, can safely be passed to child processes. Popen objects ============= Instances of the Popen class have the following methods: poll() Check if child process has terminated. Returns returncode attribute. wait() Wait for child process to terminate. Returns returncode attribute. communicate(input=None) Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional stdin argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr). Note: The data read is buffered in memory, so do not use this method if the data size is large or unlimited. The following attributes are also available: stdin If the stdin argument is PIPE, this attribute is a file object that provides input to the child process. Otherwise, it is None. stdout If the stdout argument is PIPE, this attribute is a file object that provides output from the child process. Otherwise, it is None. stderr If the stderr argument is PIPE, this attribute is file object that provides error output from the child process. Otherwise, it is None. pid The process ID of the child process. returncode The child return code. A None value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (UNIX only). Replacing older functions with the subprocess module ==================================================== In this section, "a ==> b" means that b can be used as a replacement for a. Note: All functions in this section fail (more or less) silently if the executed program cannot be found; this module raises an OSError exception. In the following examples, we assume that the subprocess module is imported with "from subprocess import *". Replacing /bin/sh shell backquote --------------------------------- output=`mycmd myarg` ==> output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0] Replacing shell pipe line ------------------------- output=`dmesg | grep hda` ==> p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) output = p2.communicate()[0] Replacing os.system() --------------------- sts = os.system("mycmd" + " myarg") ==> p = Popen("mycmd" + " myarg", shell=True) sts = os.waitpid(p.pid, 0) Note: * Calling the program through the shell is usually not required. * It's easier to look at the returncode attribute than the exitstatus. A more real-world example would look like this: try: retcode = call("mycmd" + " myarg", shell=True) if retcode < 0: print >>sys.stderr, "Child was terminated by signal", -retcode else: print >>sys.stderr, "Child returned", retcode except OSError, e: print >>sys.stderr, "Execution failed:", e Replacing os.spawn* ------------------- P_NOWAIT example: pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg") ==> pid = Popen(["/bin/mycmd", "myarg"]).pid P_WAIT example: retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg") ==> retcode = call(["/bin/mycmd", "myarg"]) Vector example: os.spawnvp(os.P_NOWAIT, path, args) ==> Popen([path] + args[1:]) Environment example: os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env) ==> Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"}) Replacing os.popen* ------------------- pipe = os.popen(cmd, mode='r', bufsize) ==> pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout pipe = os.popen(cmd, mode='w', bufsize) ==> pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin (child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize) ==> p = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdin, child_stdout) = (p.stdin, p.stdout) (child_stdin, child_stdout, child_stderr) = os.popen3(cmd, mode, bufsize) ==> p = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) (child_stdin, child_stdout, child_stderr) = (p.stdin, p.stdout, p.stderr) (child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize) ==> p = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) (child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout) Replacing popen2.* ------------------ Note: If the cmd argument to popen2 functions is a string, the command is executed through /bin/sh. If it is a list, the command is directly executed. (child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode) ==> p = Popen(["somestring"], shell=True, bufsize=bufsize stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdout, child_stdin) = (p.stdout, p.stdin) (child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode) ==> p = Popen(["mycmd", "myarg"], bufsize=bufsize, stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdout, child_stdin) = (p.stdout, p.stdin) The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen, except that: * subprocess.Popen raises an exception if the execution fails * the capturestderr argument is replaced with the stderr argument. * stdin=PIPE and stdout=PIPE must be specified. * popen2 closes all filedescriptors by default, but you have to specify close_fds=True with subprocess.Popen. """ import sys mswindows = (sys.platform == "win32") import os import types import traceback if mswindows: import threading import msvcrt if 0: # <-- change this to use pywin32 instead of the _subprocess driver import pywintypes from win32api import GetStdHandle, STD_INPUT_HANDLE, \ STD_OUTPUT_HANDLE, STD_ERROR_HANDLE from win32api import GetCurrentProcess, DuplicateHandle, \ GetModuleFileName, GetVersion from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE from win32pipe import CreatePipe from win32process import CreateProcess, STARTUPINFO, \ GetExitCodeProcess, STARTF_USESTDHANDLES, \ STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0 else: from _subprocess import * class STARTUPINFO: dwFlags = 0 hStdInput = None hStdOutput = None hStdError = None wShowWindow = 0 class pywintypes: error = IOError else: import select import errno import fcntl import pickle __all__ = ["Popen", "PIPE", "STDOUT", "call"] try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # True/False does not exist on 2.2.0 try: False except NameError: False = 0 True = 1 _active = [] def _cleanup(): for inst in _active[:]: inst.poll() PIPE = -1 STDOUT = -2 def call(*args, **kwargs): """Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) """ return Popen(*args, **kwargs).wait() def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules as the MS C runtime: 1) Arguments are delimited by white space, which is either a space or a tab. 2) A string surrounded by double quotation marks is interpreted as a single argument, regardless of white space contained within. A quoted string can be embedded in an argument. 3) A double quotation mark preceded by a backslash is interpreted as a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes is interpreted as a literal backslash. If the number of backslashes is odd, the last backslash escapes the next double quotation mark as described in rule 3. """ # See # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp result = [] needquote = False for arg in seq: bs_buf = [] # Add a space to separate this argument from the others if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) if needquote: result.append('"') for c in arg: if c == '\\': # Don't know if we need to double yet. bs_buf.append(c) elif c == '"': # Double backspaces. result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') else: # Normal char if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backspaces, if any. if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) class Popen(object): def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): """Create new Popen instance.""" _cleanup() if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows " "platforms") if close_fds: raise ValueError("close_fds is not supported on Windows " "platforms") else: # POSIX if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows " "platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows " "platforms") self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines # Input and output objects. The general principle is like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are None when not using PIPEs. The child objects are None # when not redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) if p2cwrite: self.stdin = os.fdopen(p2cwrite, 'wb', bufsize) if c2pread: if universal_newlines: self.stdout = os.fdopen(c2pread, 'rU', bufsize) else: self.stdout = os.fdopen(c2pread, 'rb', bufsize) if errread: if universal_newlines: self.stderr = os.fdopen(errread, 'rU', bufsize) else: self.stderr = os.fdopen(errread, 'rb', bufsize) _active.append(self) def _translate_newlines(self, data): data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") return data if mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tupel with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ if stdin == None and stdout == None and stderr == None: return (None, None, None, None, None, None) p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin == None: p2cread = GetStdHandle(STD_INPUT_HANDLE) elif stdin == PIPE: p2cread, p2cwrite = CreatePipe(None, 0) # Detach and turn into fd p2cwrite = p2cwrite.Detach() p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0) elif type(stdin) == types.IntType: p2cread = msvcrt.get_osfhandle(stdin) else: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout == None: c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE) elif stdout == PIPE: c2pread, c2pwrite = CreatePipe(None, 0) # Detach and turn into fd c2pread = c2pread.Detach() c2pread = msvcrt.open_osfhandle(c2pread, 0) elif type(stdout) == types.IntType: c2pwrite = msvcrt.get_osfhandle(stdout) else: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr == None: errwrite = GetStdHandle(STD_ERROR_HANDLE) elif stderr == PIPE: errread, errwrite = CreatePipe(None, 0) # Detach and turn into fd errread = errread.Detach() errread = msvcrt.open_osfhandle(errread, 0) elif stderr == STDOUT: errwrite = c2pwrite elif type(stderr) == types.IntType: errwrite = msvcrt.get_osfhandle(stderr) else: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS) def _find_w9xpopen(self): """Find and return absolut path to w9xpopen.exe""" w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): # Eeek - file-not-found - possibly an embedding # situation - see if we can locate it in sys.exec_prefix w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is " "needed for Popen to work with your " "shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (MS Windows version)""" if not isinstance(args, types.StringTypes): args = list2cmdline(args) # Process startup details if startupinfo == None: startupinfo = STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = comspec + " /c " + args if (GetVersion() >= 0x80000000L or os.path.basename(comspec).lower() == "command.com"): # Win9x, or using command.com on NT. We need to # use the w9xpopen intermediate program. For more # information, see KB Q150956 # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp) w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) # Not passing CREATE_NEW_CONSOLE has been known to # cause random failures on win9x. Specifically a # dialog: "Your program accessed mem currently in # use at xxx" and a hopeful warning about the # stability of your system. Cost is Ctrl+C wont # kill children. creationflags |= CREATE_NEW_CONSOLE # Start the process try: hp, ht, pid, tid = CreateProcess(executable, args, # no special security None, None, # must inherit handles to pass std # handles 1, creationflags, env, cwd, startupinfo) except pywintypes.error, e: # Translate pywintypes.error to WindowsError, which is # a subclass of OSError. FIXME: We should really # translate errno using _sys_errlist (or simliar), but # how can this be done from Python? raise WindowsError(*e.args) # Retain the process handle, but close the thread handle self._handle = hp self.pid = pid ht.Close() # Child is launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained in this process or else the # pipe will not close when the child process exits and the # ReadFile will hang. if p2cread != None: p2cread.Close() if c2pwrite != None: c2pwrite.Close() if errwrite != None: errwrite.Close() def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode == None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode == None: obj = WaitForSingleObject(self._handle, INFINITE) self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) def communicate(self, input=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" stdout = None # Return stderr = None # Return if self.stdout: stdout = [] stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout)) stdout_thread.setDaemon(True) stdout_thread.start() if self.stderr: stderr = [] stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr)) stderr_thread.setDaemon(True) stderr_thread.start() if self.stdin: if input != None: self.stdin.write(input) self.stdin.close() if self.stdout: stdout_thread.join() if self.stderr: stderr_thread.join() # All data exchanged. Translate lists into strings. if stdout != None: stdout = stdout[0] if stderr != None: stderr = stderr[0] # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(open, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) else: # # POSIX methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tupel with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin == None: pass elif stdin == PIPE: p2cread, p2cwrite = os.pipe() elif type(stdin) == types.IntType: p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout == None: pass elif stdout == PIPE: c2pread, c2pwrite = os.pipe() elif type(stdout) == types.IntType: c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr == None: pass elif stderr == PIPE: errread, errwrite = os.pipe() elif stderr == STDOUT: errwrite = c2pwrite elif type(stderr) == types.IntType: errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _set_cloexec_flag(self, fd): try: cloexec_flag = fcntl.FD_CLOEXEC except AttributeError: cloexec_flag = 1 old = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) def _close_fds(self, but): for i in xrange(3, MAXFD): if i == but: continue try: os.close(i) except: pass def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, types.StringTypes): args = [args] if shell: args = ["/bin/sh", "-c"] + args if executable == None: executable = args[0] # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = os.pipe() self._set_cloexec_flag(errpipe_write) self.pid = os.fork() if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite: os.close(p2cwrite) if c2pread: os.close(c2pread) if errread: os.close(errread) os.close(errpipe_read) # Dup fds for child if p2cread: os.dup2(p2cread, 0) if c2pwrite: os.dup2(c2pwrite, 1) if errwrite: os.dup2(errwrite, 2) # Close pipe fds. Make sure we doesn't close the same # fd more than once. if p2cread: os.close(p2cread) if c2pwrite and c2pwrite not in (p2cread,): os.close(c2pwrite) if errwrite and errwrite not in (p2cread, c2pwrite): os.close(errwrite) # Close all other fds, if asked for if close_fds: self._close_fds(but=errpipe_write) if cwd != None: os.chdir(cwd) if preexec_fn: apply(preexec_fn) if env == None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception object exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) # This exitcode won't be reported to applications, so it # really doesn't matter what we return. os._exit(255) # Parent os.close(errpipe_write) if p2cread and p2cwrite: os.close(p2cread) if c2pwrite and c2pread: os.close(c2pwrite) if errwrite and errread: os.close(errwrite) # Wait for exec to fail or succeed; possibly raising exception data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB os.close(errpipe_read) if data != "": os.waitpid(self.pid, 0) child_exception = pickle.loads(data) raise child_exception def _handle_exitstatus(self, sts): if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) elif os.WIFEXITED(sts): self.returncode = os.WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") _active.remove(self) def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode == None: try: pid, sts = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except os.error: pass return self.returncode def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode == None: pid, sts = os.waitpid(self.pid, 0) self._handle_exitstatus(sts) return self.returncode def communicate(self, input=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" read_set = [] write_set = [] stdout = None # Return stderr = None # Return if self.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if input: write_set.append(self.stdin) else: self.stdin.close() if self.stdout: read_set.append(self.stdout) stdout = [] if self.stderr: read_set.append(self.stderr) stderr = [] while read_set or write_set: rlist, wlist, xlist = select.select(read_set, write_set, []) if self.stdin in wlist: # When select has indicated that the file is writable, # we can write up to PIPE_BUF bytes without risk # blocking. POSIX defines PIPE_BUF >= 512 bytes_written = os.write(self.stdin.fileno(), input[:512]) input = input[bytes_written:] if not input: self.stdin.close() write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if data == "": self.stdout.close() read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if data == "": self.stderr.close() read_set.remove(self.stderr) stderr.append(data) # All data exchanged. Translate lists into strings. if stdout != None: stdout = ''.join(stdout) if stderr != None: stderr = ''.join(stderr) # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(open, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) def _demo_posix(): # # Example 1: Simple redirection: Get process list # plist = Popen(["ps"], stdout=PIPE).communicate()[0] print "Process list:" print plist # # Example 2: Change uid before executing child # if os.getuid() == 0: p = Popen(["id"], preexec_fn=lambda: os.setuid(100)) p.wait() # # Example 3: Connecting several subprocesses # print "Looking for 'hda'..." p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) print repr(p2.communicate()[0]) # # Example 4: Catch execution error # print print "Trying a weird file..." try: print Popen(["/this/path/does/not/exist"]).communicate() except OSError, e: if e.errno == errno.ENOENT: print "The file didn't exist. I thought so..." print "Child traceback:" print e.child_traceback else: print "Error", e.errno else: print >>sys.stderr, "Gosh. No error." def _demo_windows(): # # Example 1: Connecting several subprocesses # print "Looking for 'PROMPT' in set output..." p1 = Popen("set", stdout=PIPE, shell=True) p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE) print repr(p2.communicate()[0]) # # Example 2: Simple execution of program # print "Executing calc..." p = Popen("calc") p.wait() if __name__ == "__main__": if mswindows: _demo_windows() else: _demo_posix()
Python
"""optparse - a powerful, extensible, and easy-to-use option parser. By Greg Ward <gward@python.net> Originally distributed as Optik; see http://optik.sourceforge.net/ . If you have problems with this module, please do not file bugs, patches, or feature requests with Python; instead, use Optik's SourceForge project page: http://sourceforge.net/projects/optik For support, use the optik-users@lists.sourceforge.net mailing list (http://lists.sourceforge.net/lists/listinfo/optik-users). """ # Python developers: please do not make changes to this file, since # it is automatically generated from the Optik source code. __version__ = "1.5a2" __all__ = ['Option', 'SUPPRESS_HELP', 'SUPPRESS_USAGE', 'Values', 'OptionContainer', 'OptionGroup', 'OptionParser', 'HelpFormatter', 'IndentedHelpFormatter', 'TitledHelpFormatter', 'OptParseError', 'OptionError', 'OptionConflictError', 'OptionValueError', 'BadOptionError'] __copyright__ = """ Copyright (c) 2001-2004 Gregory P. Ward. All rights reserved. Copyright (c) 2002-2004 Python Software Foundation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import sys, os import types import textwrap from gettext import gettext as _ def _repr(self): return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) # This file was generated from: # Id: option_parser.py 421 2004-10-26 00:45:16Z greg # Id: option.py 422 2004-10-26 00:53:47Z greg # Id: help.py 367 2004-07-24 23:21:21Z gward # Id: errors.py 367 2004-07-24 23:21:21Z gward class OptParseError (Exception): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class OptionError (OptParseError): """ Raised if an Option instance is created with invalid or inconsistent arguments. """ def __init__(self, msg, option): self.msg = msg self.option_id = str(option) def __str__(self): if self.option_id: return "option %s: %s" % (self.option_id, self.msg) else: return self.msg class OptionConflictError (OptionError): """ Raised if conflicting options are added to an OptionParser. """ class OptionValueError (OptParseError): """ Raised if an invalid option value is encountered on the command line. """ class BadOptionError (OptParseError): """ Raised if an invalid or ambiguous option is seen on the command-line. """ class HelpFormatter: """ Abstract base class for formatting option help. OptionParser instances should use one of the HelpFormatter subclasses for formatting help; by default IndentedHelpFormatter is used. Instance attributes: parser : OptionParser the controlling OptionParser instance indent_increment : int the number of columns to indent per nesting level max_help_position : int the maximum starting column for option help text help_position : int the calculated starting column for option help text; initially the same as the maximum width : int total number of columns for output (pass None to constructor for this value to be taken from the $COLUMNS environment variable) level : int current indentation level current_indent : int current indentation level (in columns) help_width : int number of columns available for option help text (calculated) default_tag : str text to replace with each option's default value, "%default" by default. Set to false value to disable default value expansion. option_strings : { Option : str } maps Option instances to the snippet of help text explaining the syntax of that option, e.g. "-h, --help" or "-fFILE, --file=FILE" _short_opt_fmt : str format string controlling how short options with values are printed in help text. Must be either "%s%s" ("-fFILE") or "%s %s" ("-f FILE"), because those are the two syntaxes that Optik supports. _long_opt_fmt : str similar but for long options; must be either "%s %s" ("--file FILE") or "%s=%s" ("--file=FILE"). """ NO_DEFAULT_VALUE = "none" def __init__(self, indent_increment, max_help_position, width, short_first): self.parser = None self.indent_increment = indent_increment self.help_position = self.max_help_position = max_help_position if width is None: try: width = int(os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self.width = width self.current_indent = 0 self.level = 0 self.help_width = None # computed later self.short_first = short_first self.default_tag = "%default" self.option_strings = {} self._short_opt_fmt = "%s %s" self._long_opt_fmt = "%s=%s" def set_parser(self, parser): self.parser = parser def set_short_opt_delimiter(self, delim): if delim not in ("", " "): raise ValueError( "invalid metavar delimiter for short options: %r" % delim) self._short_opt_fmt = "%s" + delim + "%s" def set_long_opt_delimiter(self, delim): if delim not in ("=", " "): raise ValueError( "invalid metavar delimiter for long options: %r" % delim) self._long_opt_fmt = "%s" + delim + "%s" def indent(self): self.current_indent += self.indent_increment self.level += 1 def dedent(self): self.current_indent -= self.indent_increment assert self.current_indent >= 0, "Indent decreased below 0." self.level -= 1 def format_usage(self, usage): raise NotImplementedError, "subclasses must implement" def format_heading(self, heading): raise NotImplementedError, "subclasses must implement" def format_description(self, description): if not description: return "" desc_width = self.width - self.current_indent indent = " "*self.current_indent return textwrap.fill(description, desc_width, initial_indent=indent, subsequent_indent=indent) + "\n" def expand_default(self, option): if self.parser is None or not self.default_tag: return option.help default_value = self.parser.defaults.get(option.dest) if default_value is NO_DEFAULT or default_value is None: default_value = self.NO_DEFAULT_VALUE return option.help.replace(self.default_tag, str(default_value)) def format_option(self, option): # The help for each option consists of two parts: # * the opt strings and metavars # eg. ("-x", or "-fFILENAME, --file=FILENAME") # * the user-supplied help string # eg. ("turn on expert mode", "read data from FILENAME") # # If possible, we write both of these on the same line: # -x turn on expert mode # # But if the opt string list is too long, we put the help # string on a second line, indented to the same column it would # start in if it fit on the first line. # -fFILENAME, --file=FILENAME # read data from FILENAME result = [] opts = self.option_strings[option] opt_width = self.help_position - self.current_indent - 2 if len(opts) > opt_width: opts = "%*s%s\n" % (self.current_indent, "", opts) indent_first = self.help_position else: # start help on same line as opts opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) indent_first = 0 result.append(opts) if option.help: help_text = self.expand_default(option) help_lines = textwrap.wrap(help_text, self.help_width) result.append("%*s%s\n" % (indent_first, "", help_lines[0])) result.extend(["%*s%s\n" % (self.help_position, "", line) for line in help_lines[1:]]) elif opts[-1] != "\n": result.append("\n") return "".join(result) def store_option_strings(self, parser): self.indent() max_len = 0 for opt in parser.option_list: strings = self.format_option_strings(opt) self.option_strings[opt] = strings max_len = max(max_len, len(strings) + self.current_indent) self.indent() for group in parser.option_groups: for opt in group.option_list: strings = self.format_option_strings(opt) self.option_strings[opt] = strings max_len = max(max_len, len(strings) + self.current_indent) self.dedent() self.dedent() self.help_position = min(max_len + 2, self.max_help_position) self.help_width = self.width - self.help_position def format_option_strings(self, option): """Return a comma-separated list of option strings & metavariables.""" if option.takes_value(): metavar = option.metavar or option.dest.upper() short_opts = [self._short_opt_fmt % (sopt, metavar) for sopt in option._short_opts] long_opts = [self._long_opt_fmt % (lopt, metavar) for lopt in option._long_opts] else: short_opts = option._short_opts long_opts = option._long_opts if self.short_first: opts = short_opts + long_opts else: opts = long_opts + short_opts return ", ".join(opts) class IndentedHelpFormatter (HelpFormatter): """Format help with indented section bodies. """ def __init__(self, indent_increment=2, max_help_position=24, width=None, short_first=1): HelpFormatter.__init__( self, indent_increment, max_help_position, width, short_first) def format_usage(self, usage): return _("usage: %s\n") % usage def format_heading(self, heading): return "%*s%s:\n" % (self.current_indent, "", heading) class TitledHelpFormatter (HelpFormatter): """Format help with underlined section headers. """ def __init__(self, indent_increment=0, max_help_position=24, width=None, short_first=0): HelpFormatter.__init__ ( self, indent_increment, max_help_position, width, short_first) def format_usage(self, usage): return "%s %s\n" % (self.format_heading(_("Usage")), usage) def format_heading(self, heading): return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) _builtin_cvt = { "int" : (int, _("integer")), "long" : (long, _("long integer")), "float" : (float, _("floating-point")), "complex" : (complex, _("complex")) } def check_builtin(option, opt, value): (cvt, what) = _builtin_cvt[option.type] try: return cvt(value) except ValueError: raise OptionValueError( _("option %s: invalid %s value: %r") % (opt, what, value)) def check_choice(option, opt, value): if value in option.choices: return value else: choices = ", ".join(map(repr, option.choices)) raise OptionValueError( _("option %s: invalid choice: %r (choose from %s)") % (opt, value, choices)) # Not supplying a default is different from a default of None, # so we need an explicit "not supplied" value. NO_DEFAULT = ("NO", "DEFAULT") class Option: """ Instance attributes: _short_opts : [string] _long_opts : [string] action : string type : string dest : string default : any nargs : int const : any choices : [string] callback : function callback_args : (any*) callback_kwargs : { string : any } help : string metavar : string """ # The list of instance attributes that may be set through # keyword args to the constructor. ATTRS = ['action', 'type', 'dest', 'default', 'nargs', 'const', 'choices', 'callback', 'callback_args', 'callback_kwargs', 'help', 'metavar'] # The set of actions allowed by option parsers. Explicitly listed # here so the constructor can validate its arguments. ACTIONS = ("store", "store_const", "store_true", "store_false", "append", "count", "callback", "help", "version") # The set of actions that involve storing a value somewhere; # also listed just for constructor argument validation. (If # the action is one of these, there must be a destination.) STORE_ACTIONS = ("store", "store_const", "store_true", "store_false", "append", "count") # The set of actions for which it makes sense to supply a value # type, ie. which may consume an argument from the command line. TYPED_ACTIONS = ("store", "append", "callback") # The set of actions which *require* a value type, ie. that # always consume an argument from the command line. ALWAYS_TYPED_ACTIONS = ("store", "append") # The set of known types for option parsers. Again, listed here for # constructor argument validation. TYPES = ("string", "int", "long", "float", "complex", "choice") # Dictionary of argument checking functions, which convert and # validate option arguments according to the option type. # # Signature of checking functions is: # check(option : Option, opt : string, value : string) -> any # where # option is the Option instance calling the checker # opt is the actual option seen on the command-line # (eg. "-a", "--file") # value is the option argument seen on the command-line # # The return value should be in the appropriate Python type # for option.type -- eg. an integer if option.type == "int". # # If no checker is defined for a type, arguments will be # unchecked and remain strings. TYPE_CHECKER = { "int" : check_builtin, "long" : check_builtin, "float" : check_builtin, "complex": check_builtin, "choice" : check_choice, } # CHECK_METHODS is a list of unbound method objects; they are called # by the constructor, in order, after all attributes are # initialized. The list is created and filled in later, after all # the methods are actually defined. (I just put it here because I # like to define and document all class attributes in the same # place.) Subclasses that add another _check_*() method should # define their own CHECK_METHODS list that adds their check method # to those from this class. CHECK_METHODS = None # -- Constructor/initialization methods ---------------------------- def __init__(self, *opts, **attrs): # Set _short_opts, _long_opts attrs from 'opts' tuple. # Have to be set now, in case no option strings are supplied. self._short_opts = [] self._long_opts = [] opts = self._check_opt_strings(opts) self._set_opt_strings(opts) # Set all other attrs (action, type, etc.) from 'attrs' dict self._set_attrs(attrs) # Check all the attributes we just set. There are lots of # complicated interdependencies, but luckily they can be farmed # out to the _check_*() methods listed in CHECK_METHODS -- which # could be handy for subclasses! The one thing these all share # is that they raise OptionError if they discover a problem. for checker in self.CHECK_METHODS: checker(self) def _check_opt_strings(self, opts): # Filter out None because early versions of Optik had exactly # one short option and one long option, either of which # could be None. opts = filter(None, opts) if not opts: raise TypeError("at least one option string must be supplied") return opts def _set_opt_strings(self, opts): for opt in opts: if len(opt) < 2: raise OptionError( "invalid option string %r: " "must be at least two characters long" % opt, self) elif len(opt) == 2: if not (opt[0] == "-" and opt[1] != "-"): raise OptionError( "invalid short option string %r: " "must be of the form -x, (x any non-dash char)" % opt, self) self._short_opts.append(opt) else: if not (opt[0:2] == "--" and opt[2] != "-"): raise OptionError( "invalid long option string %r: " "must start with --, followed by non-dash" % opt, self) self._long_opts.append(opt) def _set_attrs(self, attrs): for attr in self.ATTRS: if attrs.has_key(attr): setattr(self, attr, attrs[attr]) del attrs[attr] else: if attr == 'default': setattr(self, attr, NO_DEFAULT) else: setattr(self, attr, None) if attrs: attrs = attrs.keys() attrs.sort() raise OptionError( "invalid keyword arguments: %s" % ", ".join(attrs), self) # -- Constructor validation methods -------------------------------- def _check_action(self): if self.action is None: self.action = "store" elif self.action not in self.ACTIONS: raise OptionError("invalid action: %r" % self.action, self) def _check_type(self): if self.type is None: if self.action in self.ALWAYS_TYPED_ACTIONS: if self.choices is not None: # The "choices" attribute implies "choice" type. self.type = "choice" else: # No type given? "string" is the most sensible default. self.type = "string" else: # Allow type objects as an alternative to their names. if type(self.type) is type: self.type = self.type.__name__ if self.type == "str": self.type = "string" if self.type not in self.TYPES: raise OptionError("invalid option type: %r" % self.type, self) if self.action not in self.TYPED_ACTIONS: raise OptionError( "must not supply a type for action %r" % self.action, self) def _check_choice(self): if self.type == "choice": if self.choices is None: raise OptionError( "must supply a list of choices for type 'choice'", self) elif type(self.choices) not in (types.TupleType, types.ListType): raise OptionError( "choices must be a list of strings ('%s' supplied)" % str(type(self.choices)).split("'")[1], self) elif self.choices is not None: raise OptionError( "must not supply choices for type %r" % self.type, self) def _check_dest(self): # No destination given, and we need one for this action. The # self.type check is for callbacks that take a value. takes_value = (self.action in self.STORE_ACTIONS or self.type is not None) if self.dest is None and takes_value: # Glean a destination from the first long option string, # or from the first short option string if no long options. if self._long_opts: # eg. "--foo-bar" -> "foo_bar" self.dest = self._long_opts[0][2:].replace('-', '_') else: self.dest = self._short_opts[0][1] def _check_const(self): if self.action != "store_const" and self.const is not None: raise OptionError( "'const' must not be supplied for action %r" % self.action, self) def _check_nargs(self): if self.action in self.TYPED_ACTIONS: if self.nargs is None: self.nargs = 1 elif self.nargs is not None: raise OptionError( "'nargs' must not be supplied for action %r" % self.action, self) def _check_callback(self): if self.action == "callback": if not callable(self.callback): raise OptionError( "callback not callable: %r" % self.callback, self) if (self.callback_args is not None and type(self.callback_args) is not types.TupleType): raise OptionError( "callback_args, if supplied, must be a tuple: not %r" % self.callback_args, self) if (self.callback_kwargs is not None and type(self.callback_kwargs) is not types.DictType): raise OptionError( "callback_kwargs, if supplied, must be a dict: not %r" % self.callback_kwargs, self) else: if self.callback is not None: raise OptionError( "callback supplied (%r) for non-callback option" % self.callback, self) if self.callback_args is not None: raise OptionError( "callback_args supplied for non-callback option", self) if self.callback_kwargs is not None: raise OptionError( "callback_kwargs supplied for non-callback option", self) CHECK_METHODS = [_check_action, _check_type, _check_choice, _check_dest, _check_const, _check_nargs, _check_callback] # -- Miscellaneous methods ----------------------------------------- def __str__(self): return "/".join(self._short_opts + self._long_opts) __repr__ = _repr def takes_value(self): return self.type is not None def get_opt_string(self): if self._long_opts: return self._long_opts[0] else: return self._short_opts[0] # -- Processing methods -------------------------------------------- def check_value(self, opt, value): checker = self.TYPE_CHECKER.get(self.type) if checker is None: return value else: return checker(self, opt, value) def convert_value(self, opt, value): if value is not None: if self.nargs == 1: return self.check_value(opt, value) else: return tuple([self.check_value(opt, v) for v in value]) def process(self, opt, value, values, parser): # First, convert the value(s) to the right type. Howl if any # value(s) are bogus. value = self.convert_value(opt, value) # And then take whatever action is expected of us. # This is a separate method to make life easier for # subclasses to add new actions. return self.take_action( self.action, self.dest, opt, value, values, parser) def take_action(self, action, dest, opt, value, values, parser): if action == "store": setattr(values, dest, value) elif action == "store_const": setattr(values, dest, self.const) elif action == "store_true": setattr(values, dest, True) elif action == "store_false": setattr(values, dest, False) elif action == "append": values.ensure_value(dest, []).append(value) elif action == "count": setattr(values, dest, values.ensure_value(dest, 0) + 1) elif action == "callback": args = self.callback_args or () kwargs = self.callback_kwargs or {} self.callback(self, opt, value, parser, *args, **kwargs) elif action == "help": parser.print_help() parser.exit() elif action == "version": parser.print_version() parser.exit() else: raise RuntimeError, "unknown action %r" % self.action return 1 # class Option SUPPRESS_HELP = "SUPPRESS"+"HELP" SUPPRESS_USAGE = "SUPPRESS"+"USAGE" # For compatibility with Python 2.2 try: True, False except NameError: (True, False) = (1, 0) try: basestring except NameError: basestring = (str, unicode) class Values: def __init__(self, defaults=None): if defaults: for (attr, val) in defaults.items(): setattr(self, attr, val) def __str__(self): return str(self.__dict__) __repr__ = _repr def __eq__(self, other): if isinstance(other, Values): return self.__dict__ == other.__dict__ elif isinstance(other, dict): return self.__dict__ == other else: return False def __ne__(self, other): return not (self == other) def _update_careful(self, dict): """ Update the option values from an arbitrary dictionary, but only use keys from dict that already have a corresponding attribute in self. Any keys in dict without a corresponding attribute are silently ignored. """ for attr in dir(self): if dict.has_key(attr): dval = dict[attr] if dval is not None: setattr(self, attr, dval) def _update_loose(self, dict): """ Update the option values from an arbitrary dictionary, using all keys from the dictionary regardless of whether they have a corresponding attribute in self or not. """ self.__dict__.update(dict) def _update(self, dict, mode): if mode == "careful": self._update_careful(dict) elif mode == "loose": self._update_loose(dict) else: raise ValueError, "invalid update mode: %r" % mode def read_module(self, modname, mode="careful"): __import__(modname) mod = sys.modules[modname] self._update(vars(mod), mode) def read_file(self, filename, mode="careful"): vars = {} execfile(filename, vars) self._update(vars, mode) def ensure_value(self, attr, value): if not hasattr(self, attr) or getattr(self, attr) is None: setattr(self, attr, value) return getattr(self, attr) class OptionContainer: """ Abstract base class. Class attributes: standard_option_list : [Option] list of standard options that will be accepted by all instances of this parser class (intended to be overridden by subclasses). Instance attributes: option_list : [Option] the list of Option objects contained by this OptionContainer _short_opt : { string : Option } dictionary mapping short option strings, eg. "-f" or "-X", to the Option instances that implement them. If an Option has multiple short option strings, it will appears in this dictionary multiple times. [1] _long_opt : { string : Option } dictionary mapping long option strings, eg. "--file" or "--exclude", to the Option instances that implement them. Again, a given Option can occur multiple times in this dictionary. [1] defaults : { string : any } dictionary mapping option destination names to default values for each destination [1] [1] These mappings are common to (shared by) all components of the controlling OptionParser, where they are initially created. """ def __init__(self, option_class, conflict_handler, description): # Initialize the option list and related data structures. # This method must be provided by subclasses, and it must # initialize at least the following instance attributes: # option_list, _short_opt, _long_opt, defaults. self._create_option_list() self.option_class = option_class self.set_conflict_handler(conflict_handler) self.set_description(description) def _create_option_mappings(self): # For use by OptionParser constructor -- create the master # option mappings used by this OptionParser and all # OptionGroups that it owns. self._short_opt = {} # single letter -> Option instance self._long_opt = {} # long option -> Option instance self.defaults = {} # maps option dest -> default value def _share_option_mappings(self, parser): # For use by OptionGroup constructor -- use shared option # mappings from the OptionParser that owns this OptionGroup. self._short_opt = parser._short_opt self._long_opt = parser._long_opt self.defaults = parser.defaults def set_conflict_handler(self, handler): if handler not in ("error", "resolve"): raise ValueError, "invalid conflict_resolution value %r" % handler self.conflict_handler = handler def set_description(self, description): self.description = description def get_description(self): return self.description # -- Option-adding methods ----------------------------------------- def _check_conflict(self, option): conflict_opts = [] for opt in option._short_opts: if self._short_opt.has_key(opt): conflict_opts.append((opt, self._short_opt[opt])) for opt in option._long_opts: if self._long_opt.has_key(opt): conflict_opts.append((opt, self._long_opt[opt])) if conflict_opts: handler = self.conflict_handler if handler == "error": raise OptionConflictError( "conflicting option string(s): %s" % ", ".join([co[0] for co in conflict_opts]), option) elif handler == "resolve": for (opt, c_option) in conflict_opts: if opt.startswith("--"): c_option._long_opts.remove(opt) del self._long_opt[opt] else: c_option._short_opts.remove(opt) del self._short_opt[opt] if not (c_option._short_opts or c_option._long_opts): c_option.container.option_list.remove(c_option) def add_option(self, *args, **kwargs): """add_option(Option) add_option(opt_str, ..., kwarg=val, ...) """ if type(args[0]) is types.StringType: option = self.option_class(*args, **kwargs) elif len(args) == 1 and not kwargs: option = args[0] if not isinstance(option, Option): raise TypeError, "not an Option instance: %r" % option else: raise TypeError, "invalid arguments" self._check_conflict(option) self.option_list.append(option) option.container = self for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option if option.dest is not None: # option has a dest, we need a default if option.default is not NO_DEFAULT: self.defaults[option.dest] = option.default elif not self.defaults.has_key(option.dest): self.defaults[option.dest] = None return option def add_options(self, option_list): for option in option_list: self.add_option(option) # -- Option query/removal methods ---------------------------------- def get_option(self, opt_str): return (self._short_opt.get(opt_str) or self._long_opt.get(opt_str)) def has_option(self, opt_str): return (self._short_opt.has_key(opt_str) or self._long_opt.has_key(opt_str)) def remove_option(self, opt_str): option = self._short_opt.get(opt_str) if option is None: option = self._long_opt.get(opt_str) if option is None: raise ValueError("no such option %r" % opt_str) for opt in option._short_opts: del self._short_opt[opt] for opt in option._long_opts: del self._long_opt[opt] option.container.option_list.remove(option) # -- Help-formatting methods --------------------------------------- def format_option_help(self, formatter): if not self.option_list: return "" result = [] for option in self.option_list: if not option.help is SUPPRESS_HELP: result.append(formatter.format_option(option)) return "".join(result) def format_description(self, formatter): return formatter.format_description(self.get_description()) def format_help(self, formatter): result = [] if self.description: result.append(self.format_description(formatter)) if self.option_list: result.append(self.format_option_help(formatter)) return "\n".join(result) class OptionGroup (OptionContainer): def __init__(self, parser, title, description=None): self.parser = parser OptionContainer.__init__( self, parser.option_class, parser.conflict_handler, description) self.title = title def _create_option_list(self): self.option_list = [] self._share_option_mappings(self.parser) def set_title(self, title): self.title = title # -- Help-formatting methods --------------------------------------- def format_help(self, formatter): result = formatter.format_heading(self.title) formatter.indent() result += OptionContainer.format_help(self, formatter) formatter.dedent() return result class OptionParser (OptionContainer): """ Class attributes: standard_option_list : [Option] list of standard options that will be accepted by all instances of this parser class (intended to be overridden by subclasses). Instance attributes: usage : string a usage string for your program. Before it is displayed to the user, "%prog" will be expanded to the name of your program (self.prog or os.path.basename(sys.argv[0])). prog : string the name of the current program (to override os.path.basename(sys.argv[0])). option_groups : [OptionGroup] list of option groups in this parser (option groups are irrelevant for parsing the command-line, but very useful for generating help) allow_interspersed_args : bool = true if true, positional arguments may be interspersed with options. Assuming -a and -b each take a single argument, the command-line -ablah foo bar -bboo baz will be interpreted the same as -ablah -bboo -- foo bar baz If this flag were false, that command line would be interpreted as -ablah -- foo bar -bboo baz -- ie. we stop processing options as soon as we see the first non-option argument. (This is the tradition followed by Python's getopt module, Perl's Getopt::Std, and other argument- parsing libraries, but it is generally annoying to users.) process_default_values : bool = true if true, option default values are processed similarly to option values from the command line: that is, they are passed to the type-checking function for the option's type (as long as the default value is a string). (This really only matters if you have defined custom types; see SF bug #955889.) Set it to false to restore the behaviour of Optik 1.4.1 and earlier. rargs : [string] the argument list currently being parsed. Only set when parse_args() is active, and continually trimmed down as we consume arguments. Mainly there for the benefit of callback options. largs : [string] the list of leftover arguments that we have skipped while parsing options. If allow_interspersed_args is false, this list is always empty. values : Values the set of option values currently being accumulated. Only set when parse_args() is active. Also mainly for callbacks. Because of the 'rargs', 'largs', and 'values' attributes, OptionParser is not thread-safe. If, for some perverse reason, you need to parse command-line arguments simultaneously in different threads, use different OptionParser instances. """ standard_option_list = [] def __init__(self, usage=None, option_list=None, option_class=Option, version=None, conflict_handler="error", description=None, formatter=None, add_help_option=True, prog=None): OptionContainer.__init__( self, option_class, conflict_handler, description) self.set_usage(usage) self.prog = prog self.version = version self.allow_interspersed_args = True self.process_default_values = True if formatter is None: formatter = IndentedHelpFormatter() self.formatter = formatter self.formatter.set_parser(self) # Populate the option list; initial sources are the # standard_option_list class attribute, the 'option_list' # argument, and (if applicable) the _add_version_option() and # _add_help_option() methods. self._populate_option_list(option_list, add_help=add_help_option) self._init_parsing_state() # -- Private methods ----------------------------------------------- # (used by our or OptionContainer's constructor) def _create_option_list(self): self.option_list = [] self.option_groups = [] self._create_option_mappings() def _add_help_option(self): self.add_option("-h", "--help", action="help", help=_("show this help message and exit")) def _add_version_option(self): self.add_option("--version", action="version", help=_("show program's version number and exit")) def _populate_option_list(self, option_list, add_help=True): if self.standard_option_list: self.add_options(self.standard_option_list) if option_list: self.add_options(option_list) if self.version: self._add_version_option() if add_help: self._add_help_option() def _init_parsing_state(self): # These are set in parse_args() for the convenience of callbacks. self.rargs = None self.largs = None self.values = None # -- Simple modifier methods --------------------------------------- def set_usage(self, usage): if usage is None: self.usage = _("%prog [options]") elif usage is SUPPRESS_USAGE: self.usage = None # For backwards compatibility with Optik 1.3 and earlier. elif usage.startswith("usage:" + " "): self.usage = usage[7:] else: self.usage = usage def enable_interspersed_args(self): self.allow_interspersed_args = True def disable_interspersed_args(self): self.allow_interspersed_args = False def set_process_default_values(self, process): self.process_default_values = process def set_default(self, dest, value): self.defaults[dest] = value def set_defaults(self, **kwargs): self.defaults.update(kwargs) def _get_all_options(self): options = self.option_list[:] for group in self.option_groups: options.extend(group.option_list) return options def get_default_values(self): if not self.process_default_values: # Old, pre-Optik 1.5 behaviour. return Values(self.defaults) defaults = self.defaults.copy() for option in self._get_all_options(): default = defaults.get(option.dest) if isinstance(default, basestring): opt_str = option.get_opt_string() defaults[option.dest] = option.check_value(opt_str, default) return Values(defaults) # -- OptionGroup methods ------------------------------------------- def add_option_group(self, *args, **kwargs): # XXX lots of overlap with OptionContainer.add_option() if type(args[0]) is types.StringType: group = OptionGroup(self, *args, **kwargs) elif len(args) == 1 and not kwargs: group = args[0] if not isinstance(group, OptionGroup): raise TypeError, "not an OptionGroup instance: %r" % group if group.parser is not self: raise ValueError, "invalid OptionGroup (wrong parser)" else: raise TypeError, "invalid arguments" self.option_groups.append(group) return group def get_option_group(self, opt_str): option = (self._short_opt.get(opt_str) or self._long_opt.get(opt_str)) if option and option.container is not self: return option.container return None # -- Option-parsing methods ---------------------------------------- def _get_args(self, args): if args is None: return sys.argv[1:] else: return args[:] # don't modify caller's list def parse_args(self, args=None, values=None): """ parse_args(args : [string] = sys.argv[1:], values : Values = None) -> (values : Values, args : [string]) Parse the command-line options found in 'args' (default: sys.argv[1:]). Any errors result in a call to 'error()', which by default prints the usage message to stderr and calls sys.exit() with an error message. On success returns a pair (values, args) where 'values' is an Values instance (with all your option values) and 'args' is the list of arguments left over after parsing options. """ rargs = self._get_args(args) if values is None: values = self.get_default_values() # Store the halves of the argument list as attributes for the # convenience of callbacks: # rargs # the rest of the command-line (the "r" stands for # "remaining" or "right-hand") # largs # the leftover arguments -- ie. what's left after removing # options and their arguments (the "l" stands for "leftover" # or "left-hand") self.rargs = rargs self.largs = largs = [] self.values = values try: stop = self._process_args(largs, rargs, values) except (BadOptionError, OptionValueError), err: self.error(err.msg) args = largs + rargs return self.check_values(values, args) def check_values(self, values, args): """ check_values(values : Values, args : [string]) -> (values : Values, args : [string]) Check that the supplied option values and leftover arguments are valid. Returns the option values and leftover arguments (possibly adjusted, possibly completely new -- whatever you like). Default implementation just returns the passed-in values; subclasses may override as desired. """ return (values, args) def _process_args(self, largs, rargs, values): """_process_args(largs : [string], rargs : [string], values : Values) Process command-line arguments and populate 'values', consuming options and arguments from 'rargs'. If 'allow_interspersed_args' is false, stop at the first non-option argument. If true, accumulate any interspersed non-option arguments in 'largs'. """ while rargs: arg = rargs[0] # We handle bare "--" explicitly, and bare "-" is handled by the # standard arg handler since the short arg case ensures that the # len of the opt string is greater than 1. if arg == "--": del rargs[0] return elif arg[0:2] == "--": # process a single long option (possibly with value(s)) self._process_long_opt(rargs, values) elif arg[:1] == "-" and len(arg) > 1: # process a cluster of short options (possibly with # value(s) for the last one only) self._process_short_opts(rargs, values) elif self.allow_interspersed_args: largs.append(arg) del rargs[0] else: return # stop now, leave this arg in rargs # Say this is the original argument list: # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] # ^ # (we are about to process arg(i)). # # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of # [arg0, ..., arg(i-1)] (any options and their arguments will have # been removed from largs). # # The while loop will usually consume 1 or more arguments per pass. # If it consumes 1 (eg. arg is an option that takes no arguments), # then after _process_arg() is done the situation is: # # largs = subset of [arg0, ..., arg(i)] # rargs = [arg(i+1), ..., arg(N-1)] # # If allow_interspersed_args is false, largs will always be # *empty* -- still a subset of [arg0, ..., arg(i-1)], but # not a very interesting subset! def _match_long_opt(self, opt): """_match_long_opt(opt : string) -> string Determine which long option string 'opt' matches, ie. which one it is an unambiguous abbrevation for. Raises BadOptionError if 'opt' doesn't unambiguously match any long option string. """ return _match_abbrev(opt, self._long_opt) def _process_long_opt(self, rargs, values): arg = rargs.pop(0) # Value explicitly attached to arg? Pretend it's the next # argument. if "=" in arg: (opt, next_arg) = arg.split("=", 1) rargs.insert(0, next_arg) had_explicit_value = True else: opt = arg had_explicit_value = False opt = self._match_long_opt(opt) option = self._long_opt[opt] if option.takes_value(): nargs = option.nargs if len(rargs) < nargs: if nargs == 1: self.error(_("%s option requires an argument") % opt) else: self.error(_("%s option requires %d arguments") % (opt, nargs)) elif nargs == 1: value = rargs.pop(0) else: value = tuple(rargs[0:nargs]) del rargs[0:nargs] elif had_explicit_value: self.error(_("%s option does not take a value") % opt) else: value = None option.process(opt, value, values, self) def _process_short_opts(self, rargs, values): arg = rargs.pop(0) stop = False i = 1 for ch in arg[1:]: opt = "-" + ch option = self._short_opt.get(opt) i += 1 # we have consumed a character if not option: self.error(_("no such option: %s") % opt) if option.takes_value(): # Any characters left in arg? Pretend they're the # next arg, and stop consuming characters of arg. if i < len(arg): rargs.insert(0, arg[i:]) stop = True nargs = option.nargs if len(rargs) < nargs: if nargs == 1: self.error(_("%s option requires an argument") % opt) else: self.error(_("%s option requires %d arguments") % (opt, nargs)) elif nargs == 1: value = rargs.pop(0) else: value = tuple(rargs[0:nargs]) del rargs[0:nargs] else: # option doesn't take a value value = None option.process(opt, value, values, self) if stop: break # -- Feedback methods ---------------------------------------------- def get_prog_name(self): if self.prog is None: return os.path.basename(sys.argv[0]) else: return self.prog def expand_prog_name(self, s): return s.replace("%prog", self.get_prog_name()) def get_description(self): return self.expand_prog_name(self.description) def exit(self, status=0, msg=None): if msg: sys.stderr.write(msg) sys.exit(status) def error(self, msg): """error(msg : string) Print a usage message incorporating 'msg' to stderr and exit. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(sys.stderr) self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) def get_usage(self): if self.usage: return self.formatter.format_usage( self.expand_prog_name(self.usage)) else: return "" def print_usage(self, file=None): """print_usage(file : file = stdout) Print the usage message for the current program (self.usage) to 'file' (default stdout). Any occurence of the string "%prog" in self.usage is replaced with the name of the current program (basename of sys.argv[0]). Does nothing if self.usage is empty or not defined. """ if self.usage: print >>file, self.get_usage() def get_version(self): if self.version: return self.expand_prog_name(self.version) else: return "" def print_version(self, file=None): """print_version(file : file = stdout) Print the version message for this program (self.version) to 'file' (default stdout). As with print_usage(), any occurence of "%prog" in self.version is replaced by the current program's name. Does nothing if self.version is empty or undefined. """ if self.version: print >>file, self.get_version() def format_option_help(self, formatter=None): if formatter is None: formatter = self.formatter formatter.store_option_strings(self) result = [] result.append(formatter.format_heading(_("options"))) formatter.indent() if self.option_list: result.append(OptionContainer.format_option_help(self, formatter)) result.append("\n") for group in self.option_groups: result.append(group.format_help(formatter)) result.append("\n") formatter.dedent() # Drop the last "\n", or the header if no options or option groups: return "".join(result[:-1]) def format_help(self, formatter=None): if formatter is None: formatter = self.formatter result = [] if self.usage: result.append(self.get_usage() + "\n") if self.description: result.append(self.format_description(formatter) + "\n") result.append(self.format_option_help(formatter)) return "".join(result) def print_help(self, file=None): """print_help(file : file = stdout) Print an extended help message, listing all options and any help text provided with them, to 'file' (default stdout). """ if file is None: file = sys.stdout file.write(self.format_help()) # class OptionParser def _match_abbrev(s, wordmap): """_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError. """ # Is there an exact match? if wordmap.has_key(s): return s else: # Isolate all words with s as a prefix. possibilities = [word for word in wordmap.keys() if word.startswith(s)] # No exact match, so there had better be just one possibility. if len(possibilities) == 1: return possibilities[0] elif not possibilities: raise BadOptionError(_("no such option: %s") % s) else: # More than one possible completion: ambiguous prefix. possibilities.sort() raise BadOptionError(_("ambiguous option: %s (%s?)") % (s, ", ".join(possibilities))) # Some day, there might be many Option classes. As of Optik 1.3, the # preferred way to instantiate Options is indirectly, via make_option(), # which will become a factory function when there are many Option # classes. make_option = Option
Python
#!/usr/bin/env python # # find and import a version of 'py' # import sys import os from os.path import dirname as opd, exists, join, basename, abspath def searchpy(current): while 1: last = current initpy = join(current, '__init__.py') if not exists(initpy): pydir = join(current, 'py') # recognize py-package and ensure it is importable if exists(pydir) and exists(join(pydir, '__init__.py')): #for p in sys.path: # if p == current: # return True if current != sys.path[0]: # if we are already first, then ok print >>sys.stderr, "inserting into sys.path:", current sys.path.insert(0, current) return True current = opd(current) if last == current: return False if not searchpy(abspath(os.curdir)): if not searchpy(opd(abspath(sys.argv[0]))): if not searchpy(opd(__file__)): pass # let's hope it is just on sys.path import py if __name__ == '__main__': print "py lib is at", py.__file__
Python
#!/usr/bin/env python # # find and import a version of 'py' # import sys import os from os.path import dirname as opd, exists, join, basename, abspath def searchpy(current): while 1: last = current initpy = join(current, '__init__.py') if not exists(initpy): pydir = join(current, 'py') # recognize py-package and ensure it is importable if exists(pydir) and exists(join(pydir, '__init__.py')): #for p in sys.path: # if p == current: # return True if current != sys.path[0]: # if we are already first, then ok print >>sys.stderr, "inserting into sys.path:", current sys.path.insert(0, current) return True current = opd(current) if last == current: return False if not searchpy(abspath(os.curdir)): if not searchpy(opd(abspath(sys.argv[0]))): if not searchpy(opd(__file__)): pass # let's hope it is just on sys.path import py if __name__ == '__main__': print "py lib is at", py.__file__
Python
#!/usr/bin/env python # # Test suite for Optik. Supplied by Johannes Gijsbers # (taradino@softhome.net) -- translated from the original Optik # test suite to this PyUnit-based version. # # $Id: test_optparse.py 46506 2006-05-28 18:15:43Z armin.rigo $ # import sys import os import copy import unittest from cStringIO import StringIO from pprint import pprint from test import test_support import py optparse = py.compat.optparse import sys sys.modules['optparse'] = optparse from optparse import make_option, Option, IndentedHelpFormatter, \ TitledHelpFormatter, OptionParser, OptionContainer, OptionGroup, \ SUPPRESS_HELP, SUPPRESS_USAGE, OptionError, OptionConflictError, \ BadOptionError, OptionValueError, Values, _match_abbrev # Do the right thing with boolean values for all known Python versions. try: True, False except NameError: (True, False) = (1, 0) class InterceptedError(Exception): def __init__(self, error_message=None, exit_status=None, exit_message=None): self.error_message = error_message self.exit_status = exit_status self.exit_message = exit_message def __str__(self): return self.error_message or self.exit_message or "intercepted error" class InterceptingOptionParser(OptionParser): def exit(self, status=0, msg=None): raise InterceptedError(exit_status=status, exit_message=msg) def error(self, msg): raise InterceptedError(error_message=msg) class BaseTest(unittest.TestCase): def assertParseOK(self, args, expected_opts, expected_positional_args): """Assert the options are what we expected when parsing arguments. Otherwise, fail with a nicely formatted message. Keyword arguments: args -- A list of arguments to parse with OptionParser. expected_opts -- The options expected. expected_positional_args -- The positional arguments expected. Returns the options and positional args for further testing. """ (options, positional_args) = self.parser.parse_args(args) optdict = vars(options) self.assertEqual(optdict, expected_opts, """ Options are %(optdict)s. Should be %(expected_opts)s. Args were %(args)s.""" % locals()) self.assertEqual(positional_args, expected_positional_args, """ Positional arguments are %(positional_args)s. Should be %(expected_positional_args)s. Args were %(args)s.""" % locals ()) return (options, positional_args) def assertRaises(self, func, args, kwargs, expected_exception, expected_message): """ Assert that the expected exception is raised when calling a function, and that the right error message is included with that exception. Arguments: func -- the function to call args -- positional arguments to `func` kwargs -- keyword arguments to `func` expected_exception -- exception that should be raised expected_output -- output we expect to see Returns the exception raised for further testing. """ if args is None: args = () if kwargs is None: kwargs = {} try: func(*args, **kwargs) except expected_exception, err: actual_message = str(err) self.assertEqual(actual_message, expected_message, """\ expected exception message: '''%(expected_message)s''' actual exception message: '''%(actual_message)s''' """ % locals()) return err else: self.fail("""expected exception %(expected_exception)s not raised called %(func)r with args %(args)r and kwargs %(kwargs)r """ % locals ()) # -- Assertions used in more than one class -------------------- def assertParseFail(self, cmdline_args, expected_output): """ Assert the parser fails with the expected message. Caller must ensure that self.parser is an InterceptingOptionParser. """ try: self.parser.parse_args(cmdline_args) except InterceptedError, err: self.assertEqual(err.error_message, expected_output) else: self.assertFalse("expected parse failure") def assertOutput(self, cmdline_args, expected_output, expected_status=0, expected_error=None): """Assert the parser prints the expected output on stdout.""" save_stdout = sys.stdout try: try: sys.stdout = StringIO() self.parser.parse_args(cmdline_args) finally: output = sys.stdout.getvalue() sys.stdout = save_stdout except InterceptedError, err: self.assertEqual(output, expected_output) self.assertEqual(err.exit_status, expected_status) self.assertEqual(err.exit_message, expected_error) else: self.assertFalse("expected parser.exit()") def assertTypeError(self, func, expected_message, *args): """Assert that TypeError is raised when executing func.""" self.assertRaises(func, args, None, TypeError, expected_message) def assertHelp(self, parser, expected_help): actual_help = parser.format_help() if actual_help != expected_help: raise self.failureException( 'help text failure; expected:\n"' + expected_help + '"; got:\n"' + actual_help + '"\n') # -- Test make_option() aka Option ------------------------------------- # It's not necessary to test correct options here. All the tests in the # parser.parse_args() section deal with those, because they're needed # there. class TestOptionChecks(BaseTest): def setUp(self): self.parser = OptionParser(usage=SUPPRESS_USAGE) def assertOptionError(self, expected_message, args=[], kwargs={}): self.assertRaises(make_option, args, kwargs, OptionError, expected_message) def test_opt_string_empty(self): self.assertTypeError(make_option, "at least one option string must be supplied") def test_opt_string_too_short(self): self.assertOptionError( "invalid option string 'b': must be at least two characters long", ["b"]) def test_opt_string_short_invalid(self): self.assertOptionError( "invalid short option string '--': must be " "of the form -x, (x any non-dash char)", ["--"]) def test_opt_string_long_invalid(self): self.assertOptionError( "invalid long option string '---': " "must start with --, followed by non-dash", ["---"]) def test_attr_invalid(self): self.assertOptionError( "option -b: invalid keyword arguments: bar, foo", ["-b"], {'foo': None, 'bar': None}) def test_action_invalid(self): self.assertOptionError( "option -b: invalid action: 'foo'", ["-b"], {'action': 'foo'}) def test_type_invalid(self): self.assertOptionError( "option -b: invalid option type: 'foo'", ["-b"], {'type': 'foo'}) self.assertOptionError( "option -b: invalid option type: 'tuple'", ["-b"], {'type': tuple}) def test_no_type_for_action(self): self.assertOptionError( "option -b: must not supply a type for action 'count'", ["-b"], {'action': 'count', 'type': 'int'}) def test_no_choices_list(self): self.assertOptionError( "option -b/--bad: must supply a list of " "choices for type 'choice'", ["-b", "--bad"], {'type': "choice"}) def test_bad_choices_list(self): typename = type('').__name__ self.assertOptionError( "option -b/--bad: choices must be a list of " "strings ('%s' supplied)" % typename, ["-b", "--bad"], {'type': "choice", 'choices':"bad choices"}) def test_no_choices_for_type(self): self.assertOptionError( "option -b: must not supply choices for type 'int'", ["-b"], {'type': 'int', 'choices':"bad"}) def test_no_const_for_action(self): self.assertOptionError( "option -b: 'const' must not be supplied for action 'store'", ["-b"], {'action': 'store', 'const': 1}) def test_no_nargs_for_action(self): self.assertOptionError( "option -b: 'nargs' must not be supplied for action 'count'", ["-b"], {'action': 'count', 'nargs': 2}) def test_callback_not_callable(self): self.assertOptionError( "option -b: callback not callable: 'foo'", ["-b"], {'action': 'callback', 'callback': 'foo'}) def dummy(self): pass def test_callback_args_no_tuple(self): self.assertOptionError( "option -b: callback_args, if supplied, " "must be a tuple: not 'foo'", ["-b"], {'action': 'callback', 'callback': self.dummy, 'callback_args': 'foo'}) def test_callback_kwargs_no_dict(self): self.assertOptionError( "option -b: callback_kwargs, if supplied, " "must be a dict: not 'foo'", ["-b"], {'action': 'callback', 'callback': self.dummy, 'callback_kwargs': 'foo'}) def test_no_callback_for_action(self): self.assertOptionError( "option -b: callback supplied ('foo') for non-callback option", ["-b"], {'action': 'store', 'callback': 'foo'}) def test_no_callback_args_for_action(self): self.assertOptionError( "option -b: callback_args supplied for non-callback option", ["-b"], {'action': 'store', 'callback_args': 'foo'}) def test_no_callback_kwargs_for_action(self): self.assertOptionError( "option -b: callback_kwargs supplied for non-callback option", ["-b"], {'action': 'store', 'callback_kwargs': 'foo'}) class TestOptionParser(BaseTest): def setUp(self): self.parser = OptionParser() self.parser.add_option("-v", "--verbose", "-n", "--noisy", action="store_true", dest="verbose") self.parser.add_option("-q", "--quiet", "--silent", action="store_false", dest="verbose") def test_add_option_no_Option(self): self.assertTypeError(self.parser.add_option, "not an Option instance: None", None) def test_add_option_invalid_arguments(self): self.assertTypeError(self.parser.add_option, "invalid arguments", None, None) def test_get_option(self): opt1 = self.parser.get_option("-v") self.assert_(isinstance(opt1, Option)) self.assertEqual(opt1._short_opts, ["-v", "-n"]) self.assertEqual(opt1._long_opts, ["--verbose", "--noisy"]) self.assertEqual(opt1.action, "store_true") self.assertEqual(opt1.dest, "verbose") def test_get_option_equals(self): opt1 = self.parser.get_option("-v") opt2 = self.parser.get_option("--verbose") opt3 = self.parser.get_option("-n") opt4 = self.parser.get_option("--noisy") self.assert_(opt1 is opt2 is opt3 is opt4) def test_has_option(self): self.assert_(self.parser.has_option("-v")) self.assert_(self.parser.has_option("--verbose")) def assert_removed(self): self.assert_(self.parser.get_option("-v") is None) self.assert_(self.parser.get_option("--verbose") is None) self.assert_(self.parser.get_option("-n") is None) self.assert_(self.parser.get_option("--noisy") is None) self.failIf(self.parser.has_option("-v")) self.failIf(self.parser.has_option("--verbose")) self.failIf(self.parser.has_option("-n")) self.failIf(self.parser.has_option("--noisy")) self.assert_(self.parser.has_option("-q")) self.assert_(self.parser.has_option("--silent")) def test_remove_short_opt(self): self.parser.remove_option("-n") self.assert_removed() def test_remove_long_opt(self): self.parser.remove_option("--verbose") self.assert_removed() def test_remove_nonexistent(self): self.assertRaises(self.parser.remove_option, ('foo',), None, ValueError, "no such option 'foo'") class TestOptionValues(BaseTest): def setUp(self): pass def test_basics(self): values = Values() self.assertEqual(vars(values), {}) self.assertEqual(values, {}) self.assertNotEqual(values, {"foo": "bar"}) self.assertNotEqual(values, "") dict = {"foo": "bar", "baz": 42} values = Values(defaults=dict) self.assertEqual(vars(values), dict) self.assertEqual(values, dict) self.assertNotEqual(values, {"foo": "bar"}) self.assertNotEqual(values, {}) self.assertNotEqual(values, "") self.assertNotEqual(values, []) class TestTypeAliases(BaseTest): def setUp(self): self.parser = OptionParser() def test_type_aliases(self): self.parser.add_option("-x", type=int) self.parser.add_option("-s", type=str) self.parser.add_option("-t", type="str") self.assertEquals(self.parser.get_option("-x").type, "int") self.assertEquals(self.parser.get_option("-s").type, "string") self.assertEquals(self.parser.get_option("-t").type, "string") # Custom type for testing processing of default values. _time_units = { 's' : 1, 'm' : 60, 'h' : 60*60, 'd' : 60*60*24 } def _check_duration(option, opt, value): try: if value[-1].isdigit(): return int(value) else: return int(value[:-1]) * _time_units[value[-1]] except ValueError, IndexError: raise OptionValueError( 'option %s: invalid duration: %r' % (opt, value)) class DurationOption(Option): TYPES = Option.TYPES + ('duration',) TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER) TYPE_CHECKER['duration'] = _check_duration class TestDefaultValues(BaseTest): def setUp(self): self.parser = OptionParser() self.parser.add_option("-v", "--verbose", default=True) self.parser.add_option("-q", "--quiet", dest='verbose') self.parser.add_option("-n", type="int", default=37) self.parser.add_option("-m", type="int") self.parser.add_option("-s", default="foo") self.parser.add_option("-t") self.parser.add_option("-u", default=None) self.expected = { 'verbose': True, 'n': 37, 'm': None, 's': "foo", 't': None, 'u': None } def test_basic_defaults(self): self.assertEqual(self.parser.get_default_values(), self.expected) def test_mixed_defaults_post(self): self.parser.set_defaults(n=42, m=-100) self.expected.update({'n': 42, 'm': -100}) self.assertEqual(self.parser.get_default_values(), self.expected) def test_mixed_defaults_pre(self): self.parser.set_defaults(x="barf", y="blah") self.parser.add_option("-x", default="frob") self.parser.add_option("-y") self.expected.update({'x': "frob", 'y': "blah"}) self.assertEqual(self.parser.get_default_values(), self.expected) self.parser.remove_option("-y") self.parser.add_option("-y", default=None) self.expected.update({'y': None}) self.assertEqual(self.parser.get_default_values(), self.expected) def test_process_default(self): self.parser.option_class = DurationOption self.parser.add_option("-d", type="duration", default=300) self.parser.add_option("-e", type="duration", default="6m") self.parser.set_defaults(n="42") self.expected.update({'d': 300, 'e': 360, 'n': 42}) self.assertEqual(self.parser.get_default_values(), self.expected) self.parser.set_process_default_values(False) self.expected.update({'d': 300, 'e': "6m", 'n': "42"}) self.assertEqual(self.parser.get_default_values(), self.expected) class TestProgName(BaseTest): """ Test that %prog expands to the right thing in usage, version, and help strings. """ def assertUsage(self, parser, expected_usage): self.assertEqual(parser.get_usage(), expected_usage) def assertVersion(self, parser, expected_version): self.assertEqual(parser.get_version(), expected_version) def test_default_progname(self): # Make sure that program name taken from sys.argv[0] by default. save_argv = sys.argv[:] try: sys.argv[0] = os.path.join("foo", "bar", "baz.py") parser = OptionParser("usage: %prog ...", version="%prog 1.2") expected_usage = "usage: baz.py ...\n" self.assertUsage(parser, expected_usage) self.assertVersion(parser, "baz.py 1.2") self.assertHelp(parser, expected_usage + "\n" + "options:\n" " --version show program's version number and exit\n" " -h, --help show this help message and exit\n") finally: sys.argv[:] = save_argv def test_custom_progname(self): parser = OptionParser(prog="thingy", version="%prog 0.1", usage="%prog arg arg") parser.remove_option("-h") parser.remove_option("--version") expected_usage = "usage: thingy arg arg\n" self.assertUsage(parser, expected_usage) self.assertVersion(parser, "thingy 0.1") self.assertHelp(parser, expected_usage + "\n") class TestExpandDefaults(BaseTest): def setUp(self): self.parser = OptionParser(prog="test") self.help_prefix = """\ usage: test [options] options: -h, --help show this help message and exit """ self.file_help = "read from FILE [default: %default]" self.expected_help_file = self.help_prefix + \ " -f FILE, --file=FILE read from FILE [default: foo.txt]\n" self.expected_help_none = self.help_prefix + \ " -f FILE, --file=FILE read from FILE [default: none]\n" def test_option_default(self): self.parser.add_option("-f", "--file", default="foo.txt", help=self.file_help) self.assertHelp(self.parser, self.expected_help_file) def test_parser_default_1(self): self.parser.add_option("-f", "--file", help=self.file_help) self.parser.set_default('file', "foo.txt") self.assertHelp(self.parser, self.expected_help_file) def test_parser_default_2(self): self.parser.add_option("-f", "--file", help=self.file_help) self.parser.set_defaults(file="foo.txt") self.assertHelp(self.parser, self.expected_help_file) def test_no_default(self): self.parser.add_option("-f", "--file", help=self.file_help) self.assertHelp(self.parser, self.expected_help_none) def test_default_none_1(self): self.parser.add_option("-f", "--file", default=None, help=self.file_help) self.assertHelp(self.parser, self.expected_help_none) def test_default_none_2(self): self.parser.add_option("-f", "--file", help=self.file_help) self.parser.set_defaults(file=None) self.assertHelp(self.parser, self.expected_help_none) def test_float_default(self): self.parser.add_option( "-p", "--prob", help="blow up with probability PROB [default: %default]") self.parser.set_defaults(prob=0.43) expected_help = self.help_prefix + \ " -p PROB, --prob=PROB blow up with probability PROB [default: 0.43]\n" self.assertHelp(self.parser, expected_help) def test_alt_expand(self): self.parser.add_option("-f", "--file", default="foo.txt", help="read from FILE [default: *DEFAULT*]") self.parser.formatter.default_tag = "*DEFAULT*" self.assertHelp(self.parser, self.expected_help_file) def test_no_expand(self): self.parser.add_option("-f", "--file", default="foo.txt", help="read from %default file") self.parser.formatter.default_tag = None expected_help = self.help_prefix + \ " -f FILE, --file=FILE read from %default file\n" self.assertHelp(self.parser, expected_help) # -- Test parser.parse_args() ------------------------------------------ class TestStandard(BaseTest): def setUp(self): options = [make_option("-a", type="string"), make_option("-b", "--boo", type="int", dest='boo'), make_option("--foo", action="append")] self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_list=options) def test_required_value(self): self.assertParseFail(["-a"], "-a option requires an argument") def test_invalid_integer(self): self.assertParseFail(["-b", "5x"], "option -b: invalid integer value: '5x'") def test_no_such_option(self): self.assertParseFail(["--boo13"], "no such option: --boo13") def test_long_invalid_integer(self): self.assertParseFail(["--boo=x5"], "option --boo: invalid integer value: 'x5'") def test_empty(self): self.assertParseOK([], {'a': None, 'boo': None, 'foo': None}, []) def test_shortopt_empty_longopt_append(self): self.assertParseOK(["-a", "", "--foo=blah", "--foo="], {'a': "", 'boo': None, 'foo': ["blah", ""]}, []) def test_long_option_append(self): self.assertParseOK(["--foo", "bar", "--foo", "", "--foo=x"], {'a': None, 'boo': None, 'foo': ["bar", "", "x"]}, []) def test_option_argument_joined(self): self.assertParseOK(["-abc"], {'a': "bc", 'boo': None, 'foo': None}, []) def test_option_argument_split(self): self.assertParseOK(["-a", "34"], {'a': "34", 'boo': None, 'foo': None}, []) def test_option_argument_joined_integer(self): self.assertParseOK(["-b34"], {'a': None, 'boo': 34, 'foo': None}, []) def test_option_argument_split_negative_integer(self): self.assertParseOK(["-b", "-5"], {'a': None, 'boo': -5, 'foo': None}, []) def test_long_option_argument_joined(self): self.assertParseOK(["--boo=13"], {'a': None, 'boo': 13, 'foo': None}, []) def test_long_option_argument_split(self): self.assertParseOK(["--boo", "111"], {'a': None, 'boo': 111, 'foo': None}, []) def test_long_option_short_option(self): self.assertParseOK(["--foo=bar", "-axyz"], {'a': 'xyz', 'boo': None, 'foo': ["bar"]}, []) def test_abbrev_long_option(self): self.assertParseOK(["--f=bar", "-axyz"], {'a': 'xyz', 'boo': None, 'foo': ["bar"]}, []) def test_defaults(self): (options, args) = self.parser.parse_args([]) defaults = self.parser.get_default_values() self.assertEqual(vars(defaults), vars(options)) def test_ambiguous_option(self): self.parser.add_option("--foz", action="store", type="string", dest="foo") self.assertParseFail(["--f=bar"], "ambiguous option: --f (--foo, --foz?)") def test_short_and_long_option_split(self): self.assertParseOK(["-a", "xyz", "--foo", "bar"], {'a': 'xyz', 'boo': None, 'foo': ["bar"]}, []), def test_short_option_split_long_option_append(self): self.assertParseOK(["--foo=bar", "-b", "123", "--foo", "baz"], {'a': None, 'boo': 123, 'foo': ["bar", "baz"]}, []) def test_short_option_split_one_positional_arg(self): self.assertParseOK(["-a", "foo", "bar"], {'a': "foo", 'boo': None, 'foo': None}, ["bar"]), def test_short_option_consumes_separator(self): self.assertParseOK(["-a", "--", "foo", "bar"], {'a': "--", 'boo': None, 'foo': None}, ["foo", "bar"]), def test_short_option_joined_and_separator(self): self.assertParseOK(["-ab", "--", "--foo", "bar"], {'a': "b", 'boo': None, 'foo': None}, ["--foo", "bar"]), def test_invalid_option_becomes_positional_arg(self): self.assertParseOK(["-ab", "-", "--foo", "bar"], {'a': "b", 'boo': None, 'foo': ["bar"]}, ["-"]) def test_no_append_versus_append(self): self.assertParseOK(["-b3", "-b", "5", "--foo=bar", "--foo", "baz"], {'a': None, 'boo': 5, 'foo': ["bar", "baz"]}, []) def test_option_consumes_optionlike_string(self): self.assertParseOK(["-a", "-b3"], {'a': "-b3", 'boo': None, 'foo': None}, []) class TestBool(BaseTest): def setUp(self): options = [make_option("-v", "--verbose", action="store_true", dest="verbose", default=''), make_option("-q", "--quiet", action="store_false", dest="verbose")] self.parser = OptionParser(option_list = options) def test_bool_default(self): self.assertParseOK([], {'verbose': ''}, []) def test_bool_false(self): (options, args) = self.assertParseOK(["-q"], {'verbose': 0}, []) if hasattr(__builtins__, 'False'): self.failUnless(options.verbose is False) def test_bool_true(self): (options, args) = self.assertParseOK(["-v"], {'verbose': 1}, []) if hasattr(__builtins__, 'True'): self.failUnless(options.verbose is True) def test_bool_flicker_on_and_off(self): self.assertParseOK(["-qvq", "-q", "-v"], {'verbose': 1}, []) class TestChoice(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-c", action="store", type="choice", dest="choice", choices=["one", "two", "three"]) def test_valid_choice(self): self.assertParseOK(["-c", "one", "xyz"], {'choice': 'one'}, ["xyz"]) def test_invalid_choice(self): self.assertParseFail(["-c", "four", "abc"], "option -c: invalid choice: 'four' " "(choose from 'one', 'two', 'three')") def test_add_choice_option(self): self.parser.add_option("-d", "--default", choices=["four", "five", "six"]) opt = self.parser.get_option("-d") self.assertEqual(opt.type, "choice") self.assertEqual(opt.action, "store") class TestCount(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.v_opt = make_option("-v", action="count", dest="verbose") self.parser.add_option(self.v_opt) self.parser.add_option("--verbose", type="int", dest="verbose") self.parser.add_option("-q", "--quiet", action="store_const", dest="verbose", const=0) def test_empty(self): self.assertParseOK([], {'verbose': None}, []) def test_count_one(self): self.assertParseOK(["-v"], {'verbose': 1}, []) def test_count_three(self): self.assertParseOK(["-vvv"], {'verbose': 3}, []) def test_count_three_apart(self): self.assertParseOK(["-v", "-v", "-v"], {'verbose': 3}, []) def test_count_override_amount(self): self.assertParseOK(["-vvv", "--verbose=2"], {'verbose': 2}, []) def test_count_override_quiet(self): self.assertParseOK(["-vvv", "--verbose=2", "-q"], {'verbose': 0}, []) def test_count_overriding(self): self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"], {'verbose': 1}, []) def test_count_interspersed_args(self): self.assertParseOK(["--quiet", "3", "-v"], {'verbose': 1}, ["3"]) def test_count_no_interspersed_args(self): self.parser.disable_interspersed_args() self.assertParseOK(["--quiet", "3", "-v"], {'verbose': 0}, ["3", "-v"]) def test_count_no_such_option(self): self.assertParseFail(["-q3", "-v"], "no such option: -3") def test_count_option_no_value(self): self.assertParseFail(["--quiet=3", "-v"], "--quiet option does not take a value") def test_count_with_default(self): self.parser.set_default('verbose', 0) self.assertParseOK([], {'verbose':0}, []) def test_count_overriding_default(self): self.parser.set_default('verbose', 0) self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"], {'verbose': 1}, []) class TestMultipleArgs(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-p", "--point", action="store", nargs=3, type="float", dest="point") def test_nargs_with_positional_args(self): self.assertParseOK(["foo", "-p", "1", "2.5", "-4.3", "xyz"], {'point': (1.0, 2.5, -4.3)}, ["foo", "xyz"]) def test_nargs_long_opt(self): self.assertParseOK(["--point", "-1", "2.5", "-0", "xyz"], {'point': (-1.0, 2.5, -0.0)}, ["xyz"]) def test_nargs_invalid_float_value(self): self.assertParseFail(["-p", "1.0", "2x", "3.5"], "option -p: " "invalid floating-point value: '2x'") def test_nargs_required_values(self): self.assertParseFail(["--point", "1.0", "3.5"], "--point option requires 3 arguments") class TestMultipleArgsAppend(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-p", "--point", action="store", nargs=3, type="float", dest="point") self.parser.add_option("-f", "--foo", action="append", nargs=2, type="int", dest="foo") def test_nargs_append(self): self.assertParseOK(["-f", "4", "-3", "blah", "--foo", "1", "666"], {'point': None, 'foo': [(4, -3), (1, 666)]}, ["blah"]) def test_nargs_append_required_values(self): self.assertParseFail(["-f4,3"], "-f option requires 2 arguments") def test_nargs_append_simple(self): self.assertParseOK(["--foo=3", "4"], {'point': None, 'foo':[(3, 4)]}, []) class TestVersion(BaseTest): def test_version(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, version="%prog 0.1") save_argv = sys.argv[:] try: sys.argv[0] = os.path.join(os.curdir, "foo", "bar") self.assertOutput(["--version"], "bar 0.1\n") finally: sys.argv[:] = save_argv def test_no_version(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.assertParseFail(["--version"], "no such option: --version") # -- Test conflicting default values and parser.parse_args() ----------- class TestConflictingDefaults(BaseTest): """Conflicting default values: the last one should win.""" def setUp(self): self.parser = OptionParser(option_list=[ make_option("-v", action="store_true", dest="verbose", default=1)]) def test_conflict_default(self): self.parser.add_option("-q", action="store_false", dest="verbose", default=0) self.assertParseOK([], {'verbose': 0}, []) def test_conflict_default_none(self): self.parser.add_option("-q", action="store_false", dest="verbose", default=None) self.assertParseOK([], {'verbose': None}, []) class TestOptionGroup(BaseTest): def setUp(self): self.parser = OptionParser(usage=SUPPRESS_USAGE) def test_option_group_create_instance(self): group = OptionGroup(self.parser, "Spam") self.parser.add_option_group(group) group.add_option("--spam", action="store_true", help="spam spam spam spam") self.assertParseOK(["--spam"], {'spam': 1}, []) def test_add_group_no_group(self): self.assertTypeError(self.parser.add_option_group, "not an OptionGroup instance: None", None) def test_add_group_invalid_arguments(self): self.assertTypeError(self.parser.add_option_group, "invalid arguments", None, None) def test_add_group_wrong_parser(self): group = OptionGroup(self.parser, "Spam") group.parser = OptionParser() self.assertRaises(self.parser.add_option_group, (group,), None, ValueError, "invalid OptionGroup (wrong parser)") def test_group_manipulate(self): group = self.parser.add_option_group("Group 2", description="Some more options") group.set_title("Bacon") group.add_option("--bacon", type="int") self.assert_(self.parser.get_option_group("--bacon"), group) # -- Test extending and parser.parse_args() ---------------------------- class TestExtendAddTypes(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_class=self.MyOption) self.parser.add_option("-a", None, type="string", dest="a") self.parser.add_option("-f", "--file", type="file", dest="file") class MyOption (Option): def check_file (option, opt, value): if not os.path.exists(value): raise OptionValueError("%s: file does not exist" % value) elif not os.path.isfile(value): raise OptionValueError("%s: not a regular file" % value) return value TYPES = Option.TYPES + ("file",) TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER) TYPE_CHECKER["file"] = check_file def test_extend_file(self): open(test_support.TESTFN, "w").close() self.assertParseOK(["--file", test_support.TESTFN, "-afoo"], {'file': test_support.TESTFN, 'a': 'foo'}, []) os.unlink(test_support.TESTFN) def test_extend_file_nonexistent(self): self.assertParseFail(["--file", test_support.TESTFN, "-afoo"], "%s: file does not exist" % test_support.TESTFN) def test_file_irregular(self): os.mkdir(test_support.TESTFN) self.assertParseFail(["--file", test_support.TESTFN, "-afoo"], "%s: not a regular file" % test_support.TESTFN) os.rmdir(test_support.TESTFN) class TestExtendAddActions(BaseTest): def setUp(self): options = [self.MyOption("-a", "--apple", action="extend", type="string", dest="apple")] self.parser = OptionParser(option_list=options) class MyOption (Option): ACTIONS = Option.ACTIONS + ("extend",) STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) def take_action (self, action, dest, opt, value, values, parser): if action == "extend": lvalue = value.split(",") values.ensure_value(dest, []).extend(lvalue) else: Option.take_action(self, action, dest, opt, parser, value, values) def test_extend_add_action(self): self.assertParseOK(["-afoo,bar", "--apple=blah"], {'apple': ["foo", "bar", "blah"]}, []) def test_extend_add_action_normal(self): self.assertParseOK(["-a", "foo", "-abar", "--apple=x,y"], {'apple': ["foo", "bar", "x", "y"]}, []) # -- Test callbacks and parser.parse_args() ---------------------------- class TestCallback(BaseTest): def setUp(self): options = [make_option("-x", None, action="callback", callback=self.process_opt), make_option("-f", "--file", action="callback", callback=self.process_opt, type="string", dest="filename")] self.parser = OptionParser(option_list=options) def process_opt(self, option, opt, value, parser_): if opt == "-x": self.assertEqual(option._short_opts, ["-x"]) self.assertEqual(option._long_opts, []) self.assert_(parser_ is self.parser) self.assert_(value is None) self.assertEqual(vars(parser_.values), {'filename': None}) parser_.values.x = 42 elif opt == "--file": self.assertEqual(option._short_opts, ["-f"]) self.assertEqual(option._long_opts, ["--file"]) self.assert_(parser_ is self.parser) self.assertEqual(value, "foo") self.assertEqual(vars(parser_.values), {'filename': None, 'x': 42}) setattr(parser_.values, option.dest, value) else: self.fail("Unknown option %r in process_opt." % opt) def test_callback(self): self.assertParseOK(["-x", "--file=foo"], {'filename': "foo", 'x': 42}, []) def test_callback_help(self): # This test was prompted by SF bug #960515 -- the point is # not to inspect the help text, just to make sure that # format_help() doesn't crash. parser = OptionParser(usage=SUPPRESS_USAGE) parser.remove_option("-h") parser.add_option("-t", "--test", action="callback", callback=lambda: None, type="string", help="foo") expected_help = ("options:\n" " -t TEST, --test=TEST foo\n") self.assertHelp(parser, expected_help) class TestCallbackExtraArgs(BaseTest): def setUp(self): options = [make_option("-p", "--point", action="callback", callback=self.process_tuple, callback_args=(3, int), type="string", dest="points", default=[])] self.parser = OptionParser(option_list=options) def process_tuple (self, option, opt, value, parser_, len, type): self.assertEqual(len, 3) self.assert_(type is int) if opt == "-p": self.assertEqual(value, "1,2,3") elif opt == "--point": self.assertEqual(value, "4,5,6") value = tuple(map(type, value.split(","))) getattr(parser_.values, option.dest).append(value) def test_callback_extra_args(self): self.assertParseOK(["-p1,2,3", "--point", "4,5,6"], {'points': [(1,2,3), (4,5,6)]}, []) class TestCallbackMeddleArgs(BaseTest): def setUp(self): options = [make_option(str(x), action="callback", callback=self.process_n, dest='things') for x in range(-1, -6, -1)] self.parser = OptionParser(option_list=options) # Callback that meddles in rargs, largs def process_n (self, option, opt, value, parser_): # option is -3, -5, etc. nargs = int(opt[1:]) rargs = parser_.rargs if len(rargs) < nargs: self.fail("Expected %d arguments for %s option." % (nargs, opt)) dest = parser_.values.ensure_value(option.dest, []) dest.append(tuple(rargs[0:nargs])) parser_.largs.append(nargs) del rargs[0:nargs] def test_callback_meddle_args(self): self.assertParseOK(["-1", "foo", "-3", "bar", "baz", "qux"], {'things': [("foo",), ("bar", "baz", "qux")]}, [1, 3]) def test_callback_meddle_args_separator(self): self.assertParseOK(["-2", "foo", "--"], {'things': [('foo', '--')]}, [2]) class TestCallbackManyArgs(BaseTest): def setUp(self): options = [make_option("-a", "--apple", action="callback", nargs=2, callback=self.process_many, type="string"), make_option("-b", "--bob", action="callback", nargs=3, callback=self.process_many, type="int")] self.parser = OptionParser(option_list=options) def process_many (self, option, opt, value, parser_): if opt == "-a": self.assertEqual(value, ("foo", "bar")) elif opt == "--apple": self.assertEqual(value, ("ding", "dong")) elif opt == "-b": self.assertEqual(value, (1, 2, 3)) elif opt == "--bob": self.assertEqual(value, (-666, 42, 0)) def test_many_args(self): self.assertParseOK(["-a", "foo", "bar", "--apple", "ding", "dong", "-b", "1", "2", "3", "--bob", "-666", "42", "0"], {"apple": None, "bob": None}, []) class TestCallbackCheckAbbrev(BaseTest): def setUp(self): self.parser = OptionParser() self.parser.add_option("--foo-bar", action="callback", callback=self.check_abbrev) def check_abbrev (self, option, opt, value, parser): self.assertEqual(opt, "--foo-bar") def test_abbrev_callback_expansion(self): self.assertParseOK(["--foo"], {}, []) class TestCallbackVarArgs(BaseTest): def setUp(self): options = [make_option("-a", type="int", nargs=2, dest="a"), make_option("-b", action="store_true", dest="b"), make_option("-c", "--callback", action="callback", callback=self.variable_args, dest="c")] self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_list=options) def variable_args (self, option, opt, value, parser): self.assert_(value is None) done = 0 value = [] rargs = parser.rargs while rargs: arg = rargs[0] if ((arg[:2] == "--" and len(arg) > 2) or (arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")): break else: value.append(arg) del rargs[0] setattr(parser.values, option.dest, value) def test_variable_args(self): self.assertParseOK(["-a3", "-5", "--callback", "foo", "bar"], {'a': (3, -5), 'b': None, 'c': ["foo", "bar"]}, []) def test_consume_separator_stop_at_option(self): self.assertParseOK(["-c", "37", "--", "xxx", "-b", "hello"], {'a': None, 'b': True, 'c': ["37", "--", "xxx"]}, ["hello"]) def test_positional_arg_and_variable_args(self): self.assertParseOK(["hello", "-c", "foo", "-", "bar"], {'a': None, 'b': None, 'c':["foo", "-", "bar"]}, ["hello"]) def test_stop_at_option(self): self.assertParseOK(["-c", "foo", "-b"], {'a': None, 'b': True, 'c': ["foo"]}, []) def test_stop_at_invalid_option(self): self.assertParseFail(["-c", "3", "-5", "-a"], "no such option: -5") # -- Test conflict handling and parser.parse_args() -------------------- class ConflictBase(BaseTest): def setUp(self): options = [make_option("-v", "--verbose", action="count", dest="verbose", help="increment verbosity")] self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_list=options) def show_version (self, option, opt, value, parser): parser.values.show_version = 1 class TestConflict(ConflictBase): """Use the default conflict resolution for Optik 1.2: error.""" def assert_conflict_error(self, func): err = self.assertRaises( func, ("-v", "--version"), {'action' : "callback", 'callback' : self.show_version, 'help' : "show version"}, OptionConflictError, "option -v/--version: conflicting option string(s): -v") self.assertEqual(err.msg, "conflicting option string(s): -v") self.assertEqual(err.option_id, "-v/--version") def test_conflict_error(self): self.assert_conflict_error(self.parser.add_option) def test_conflict_error_group(self): group = OptionGroup(self.parser, "Group 1") self.assert_conflict_error(group.add_option) def test_no_such_conflict_handler(self): self.assertRaises( self.parser.set_conflict_handler, ('foo',), None, ValueError, "invalid conflict_resolution value 'foo'") class TestConflictResolve(ConflictBase): def setUp(self): ConflictBase.setUp(self) self.parser.set_conflict_handler("resolve") self.parser.add_option("-v", "--version", action="callback", callback=self.show_version, help="show version") def test_conflict_resolve(self): v_opt = self.parser.get_option("-v") verbose_opt = self.parser.get_option("--verbose") version_opt = self.parser.get_option("--version") self.assert_(v_opt is version_opt) self.assert_(v_opt is not verbose_opt) self.assertEqual(v_opt._long_opts, ["--version"]) self.assertEqual(version_opt._short_opts, ["-v"]) self.assertEqual(version_opt._long_opts, ["--version"]) self.assertEqual(verbose_opt._short_opts, []) self.assertEqual(verbose_opt._long_opts, ["--verbose"]) def test_conflict_resolve_help(self): self.assertOutput(["-h"], """\ options: --verbose increment verbosity -h, --help show this help message and exit -v, --version show version """) def test_conflict_resolve_short_opt(self): self.assertParseOK(["-v"], {'verbose': None, 'show_version': 1}, []) def test_conflict_resolve_long_opt(self): self.assertParseOK(["--verbose"], {'verbose': 1}, []) def test_conflict_resolve_long_opts(self): self.assertParseOK(["--verbose", "--version"], {'verbose': 1, 'show_version': 1}, []) class TestConflictOverride(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.set_conflict_handler("resolve") self.parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", help="don't do anything") self.parser.add_option("--dry-run", "-n", action="store_const", const=42, dest="dry_run", help="dry run mode") def test_conflict_override_opts(self): opt = self.parser.get_option("--dry-run") self.assertEqual(opt._short_opts, ["-n"]) self.assertEqual(opt._long_opts, ["--dry-run"]) def test_conflict_override_help(self): self.assertOutput(["-h"], """\ options: -h, --help show this help message and exit -n, --dry-run dry run mode """) def test_conflict_override_args(self): self.assertParseOK(["-n"], {'dry_run': 42}, []) # -- Other testing. ---------------------------------------------------- _expected_help_basic = """\ usage: bar.py [options] options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit """ _expected_help_long_opts_first = """\ usage: bar.py [options] options: -a APPLE throw APPLEs at basket --boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing --help, -h show this help message and exit """ _expected_help_title_formatter = """\ Usage ===== bar.py [options] options ======= -a APPLE throw APPLEs at basket --boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing --help, -h show this help message and exit """ _expected_help_short_lines = """\ usage: bar.py [options] options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit """ class TestHelp(BaseTest): def setUp(self): self.parser = self.make_parser(80) def make_parser(self, columns): options = [ make_option("-a", type="string", dest='a', metavar="APPLE", help="throw APPLEs at basket"), make_option("-b", "--boo", type="int", dest='boo', metavar="NUM", help= "shout \"boo!\" NUM times (in order to frighten away " "all the evil spirits that cause trouble and mayhem)"), make_option("--foo", action="append", type="string", dest='foo', help="store FOO in the foo list for later fooing"), ] os.environ['COLUMNS'] = str(columns) return InterceptingOptionParser(option_list=options) def assertHelpEquals(self, expected_output): save_argv = sys.argv[:] try: # Make optparse believe bar.py is being executed. sys.argv[0] = os.path.join("foo", "bar.py") self.assertOutput(["-h"], expected_output) finally: sys.argv[:] = save_argv def test_help(self): self.assertHelpEquals(_expected_help_basic) def test_help_old_usage(self): self.parser.set_usage("usage: %prog [options]") self.assertHelpEquals(_expected_help_basic) def test_help_long_opts_first(self): self.parser.formatter.short_first = 0 self.assertHelpEquals(_expected_help_long_opts_first) def test_help_title_formatter(self): self.parser.formatter = TitledHelpFormatter() self.assertHelpEquals(_expected_help_title_formatter) def test_wrap_columns(self): # Ensure that wrapping respects $COLUMNS environment variable. # Need to reconstruct the parser, since that's the only time # we look at $COLUMNS. self.parser = self.make_parser(60) self.assertHelpEquals(_expected_help_short_lines) def test_help_description_groups(self): self.parser.set_description( "This is the program description for %prog. %prog has " "an option group as well as single options.") group = OptionGroup( self.parser, "Dangerous Options", "Caution: use of these options is at your own risk. " "It is believed that some of them bite.") group.add_option("-g", action="store_true", help="Group option.") self.parser.add_option_group(group) self.assertHelpEquals("""\ usage: bar.py [options] This is the program description for bar.py. bar.py has an option group as well as single options. options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit Dangerous Options: Caution: use of these options is at your own risk. It is believed that some of them bite. -g Group option. """) class TestMatchAbbrev(BaseTest): def test_match_abbrev(self): self.assertEqual(_match_abbrev("--f", {"--foz": None, "--foo": None, "--fie": None, "--f": None}), "--f") def test_match_abbrev_error(self): s = "--f" wordmap = {"--foz": None, "--foo": None, "--fie": None} self.assertRaises( _match_abbrev, (s, wordmap), None, BadOptionError, "ambiguous option: --f (--fie, --foo, --foz?)") def _testclasses(): mod = sys.modules[__name__] return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')] def suite(): suite = unittest.TestSuite() for testclass in _testclasses(): suite.addTest(unittest.makeSuite(testclass)) return suite def test_main(): test_support.run_suite(suite()) if __name__ == '__main__': unittest.main()
Python
#
Python
"""Text wrapping and filling. """ # Copyright (C) 1999-2001 Gregory P. Ward. # Copyright (C) 2002, 2003 Python Software Foundation. # Written by Greg Ward <gward@python.net> __revision__ = "$Id: textwrap.py 36379 2007-01-09 16:55:49Z cfbolz $" import string, re # Do the right thing with boolean values for all known Python versions # (so this module can be copied to projects that don't depend on Python # 2.3, e.g. Optik and Docutils). try: True, False except NameError: (True, False) = (1, 0) __all__ = ['TextWrapper', 'wrap', 'fill'] # Hardcode the recognized whitespace characters to the US-ASCII # whitespace characters. The main reason for doing this is that in # ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales # that character winds up in string.whitespace. Respecting # string.whitespace in those cases would 1) make textwrap treat 0xa0 the # same as any other whitespace char, which is clearly wrong (it's a # *non-breaking* space), 2) possibly cause problems with Unicode, # since 0xa0 is not in range(128). _whitespace = '\t\n\x0b\x0c\r ' class TextWrapper: """ Object for wrapping/filling text. The public interface consists of the wrap() and fill() methods; the other methods are just there for subclasses to override in order to tweak the default behaviour. If you want to completely replace the main wrapping algorithm, you'll probably have to override _wrap_chunks(). Several instance attributes control various aspects of wrapping: width (default: 70) the maximum width of wrapped lines (unless break_long_words is false) initial_indent (default: "") string that will be prepended to the first line of wrapped output. Counts towards the line's width. subsequent_indent (default: "") string that will be prepended to all lines save the first of wrapped output; also counts towards each line's width. expand_tabs (default: true) Expand tabs in input text to spaces before further processing. Each tab will become 1 .. 8 spaces, depending on its position in its line. If false, each tab is treated as a single character. replace_whitespace (default: true) Replace all whitespace characters in the input text by spaces after tab expansion. Note that if expand_tabs is false and replace_whitespace is true, every tab will be converted to a single space! fix_sentence_endings (default: false) Ensure that sentence-ending punctuation is always followed by two spaces. Off by default because the algorithm is (unavoidably) imperfect. break_long_words (default: true) Break words longer than 'width'. If false, those words will not be broken, and some lines might be longer than 'width'. """ whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace)) unicode_whitespace_trans = {} uspace = ord(u' ') for x in map(ord, _whitespace): unicode_whitespace_trans[x] = uspace # This funky little regex is just the trick for splitting # text up into word-wrappable chunks. E.g. # "Hello there -- you goof-ball, use the -b option!" # splits into # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! # (after stripping out empty strings). wordsep_re = re.compile( r'(\s+|' # any whitespace r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash # XXX this is not locale- or charset-aware -- string.lowercase # is US-ASCII only (and therefore English-only) sentence_end_re = re.compile(r'[%s]' # lowercase letter r'[\.\!\?]' # sentence-ending punct. r'[\"\']?' # optional end-of-quote % string.lowercase) def __init__(self, width=70, initial_indent="", subsequent_indent="", expand_tabs=True, replace_whitespace=True, fix_sentence_endings=False, break_long_words=True): self.width = width self.initial_indent = initial_indent self.subsequent_indent = subsequent_indent self.expand_tabs = expand_tabs self.replace_whitespace = replace_whitespace self.fix_sentence_endings = fix_sentence_endings self.break_long_words = break_long_words # -- Private methods ----------------------------------------------- # (possibly useful for subclasses to override) def _munge_whitespace(self, text): """_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" becomes " foo bar baz". """ if self.expand_tabs: text = text.expandtabs() if self.replace_whitespace: if isinstance(text, str): text = text.translate(self.whitespace_trans) elif isinstance(text, unicode): text = text.translate(self.unicode_whitespace_trans) return text def _split(self, text): """_split(text : string) -> [string] Split the text to wrap into indivisible chunks. Chunks are not quite the same as words; see wrap_chunks() for full details. As an example, the text Look, goof-ball -- use the -b option! breaks into the following chunks: 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', 'option!' """ chunks = self.wordsep_re.split(text) chunks = filter(None, chunks) return chunks def _fix_sentence_endings(self, chunks): """_fix_sentence_endings(chunks : [string]) Correct for sentence endings buried in 'chunks'. Eg. when the original text contains "... foo.\nBar ...", munge_whitespace() and split() will convert that to [..., "foo.", " ", "Bar", ...] which has one too few spaces; this method simply changes the one space to two. """ i = 0 pat = self.sentence_end_re while i < len(chunks)-1: if chunks[i+1] == " " and pat.search(chunks[i]): chunks[i+1] = " " i += 2 else: i += 1 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line. """ space_left = max(width - cur_len, 1) # If we're allowed to break long words, then do so: put as much # of the next chunk onto the current line as will fit. if self.break_long_words: cur_line.append(reversed_chunks[-1][:space_left]) reversed_chunks[-1] = reversed_chunks[-1][space_left:] # Otherwise, we have to preserve the long word intact. Only add # it to the current line if there's nothing already there -- # that minimizes how much we violate the width constraint. elif not cur_line: cur_line.append(reversed_chunks.pop()) # If we're not allowed to break long words, and there's already # text on the current line, do nothing. Next time through the # main loop of _wrap_chunks(), we'll wind up here again, but # cur_len will be zero, so the next line will be entirely # devoted to the long word that we can't handle right now. def _wrap_chunks(self, chunks): """_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved. """ lines = [] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) # Arrange in reverse order so items can be efficiently popped # from a stack of chucks. chunks.reverse() while chunks: # Start the list of chunks that will make up the current line. # cur_len is just the length of all the chunks in cur_line. cur_line = [] cur_len = 0 # Figure out which static string will prefix this line. if lines: indent = self.subsequent_indent else: indent = self.initial_indent # Maximum width for this line. width = self.width - len(indent) # First chunk on line is whitespace -- drop it, unless this # is the very beginning of the text (ie. no lines started yet). if chunks[-1].strip() == '' and lines: del chunks[-1] while chunks: l = len(chunks[-1]) # Can at least squeeze this chunk onto the current line. if cur_len + l <= width: cur_line.append(chunks.pop()) cur_len += l # Nope, this line is full. else: break # The current line is full, and the next chunk is too big to # fit on *any* line (not just this one). if chunks and len(chunks[-1]) > width: self._handle_long_word(chunks, cur_line, cur_len, width) # If the last chunk on this line is all whitespace, drop it. if cur_line and cur_line[-1].strip() == '': del cur_line[-1] # Convert current line back to a string and store it in list # of all lines (return value). if cur_line: lines.append(indent + ''.join(cur_line)) return lines # -- Public interface ---------------------------------------------- def wrap(self, text): """wrap(text : string) -> [string] Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ text = self._munge_whitespace(text) chunks = self._split(text) if self.fix_sentence_endings: self._fix_sentence_endings(chunks) return self._wrap_chunks(chunks) def fill(self, text): """fill(text : string) -> string Reformat the single paragraph in 'text' to fit in lines of no more than 'self.width' columns, and return a new string containing the entire wrapped paragraph. """ return "\n".join(self.wrap(text)) # -- Convenience interface --------------------------------------------- def wrap(text, width=70, **kwargs): """Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.wrap(text) def fill(text, width=70, **kwargs): """Fill a single paragraph of text, returning a new string. Reformat the single paragraph in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped paragraph. As with wrap(), tabs are expanded and other whitespace characters converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.fill(text) # -- Loosely related functionality ------------------------------------- def dedent(text): """dedent(text : string) -> string Remove any whitespace than can be uniformly removed from the left of every line in `text`. This can be used e.g. to make triple-quoted strings line up with the left edge of screen/whatever, while still presenting it in the source code in indented form. For example: def test(): # end first line with \ to avoid the empty line! s = '''\ hello world ''' print repr(s) # prints ' hello\n world\n ' print repr(dedent(s)) # prints 'hello\n world\n' """ lines = text.expandtabs().split('\n') margin = None for line in lines: content = line.lstrip() if not content: continue indent = len(line) - len(content) if margin is None: margin = indent else: margin = min(margin, indent) if margin is not None and margin > 0: for i in range(len(lines)): lines[i] = lines[i][margin:] return '\n'.join(lines)
Python
""" compatibility modules (taken from 2.4.4) """
Python
# # a generic conversion serializer # from py.xml import escape class SimpleUnicodeVisitor(object): """ recursive visitor to write unicode. """ def __init__(self, write, indent=0, curindent=0, shortempty=True): self.write = write self.cache = {} self.visited = {} # for detection of recursion self.indent = indent self.curindent = curindent self.parents = [] self.shortempty = shortempty # short empty tags or not def visit(self, node): """ dispatcher on node's class/bases name. """ cls = node.__class__ try: visitmethod = self.cache[cls] except KeyError: for subclass in cls.__mro__: visitmethod = getattr(self, subclass.__name__, None) if visitmethod is not None: break else: visitmethod = self.object self.cache[cls] = visitmethod visitmethod(node) def object(self, obj): #self.write(obj) self.write(escape(unicode(obj))) def raw(self, obj): self.write(obj.uniobj) def list(self, obj): assert id(obj) not in self.visited self.visited[id(obj)] = 1 map(self.visit, obj) def Tag(self, tag): assert id(tag) not in self.visited try: tag.parent = self.parents[-1] except IndexError: tag.parent = None self.visited[id(tag)] = 1 tagname = getattr(tag, 'xmlname', tag.__class__.__name__) if self.curindent and not self._isinline(tagname): self.write("\n" + u' ' * self.curindent) if tag: self.curindent += self.indent self.write(u'<%s%s>' % (tagname, self.attributes(tag))) self.parents.append(tag) map(self.visit, tag) self.parents.pop() self.write(u'</%s>' % tagname) self.curindent -= self.indent else: nameattr = tagname+self.attributes(tag) if self._issingleton(tagname): self.write(u'<%s/>' % (nameattr,)) else: self.write(u'<%s></%s>' % (nameattr, tagname)) def attributes(self, tag): # serialize attributes attrlist = dir(tag.attr) attrlist.sort() l = [] for name in attrlist: res = self.repr_attribute(tag.attr, name) if res is not None: l.append(res) l.extend(self.getstyle(tag)) return u"".join(l) def repr_attribute(self, attrs, name): if name[:2] != '__': value = getattr(attrs, name) if name.endswith('_'): name = name[:-1] return u' %s="%s"' % (name, escape(unicode(value))) def getstyle(self, tag): """ return attribute list suitable for styling. """ try: styledict = tag.style.__dict__ except AttributeError: return [] else: stylelist = [x+': ' + y for x,y in styledict.items()] return [u' style="%s"' % u'; '.join(stylelist)] def _issingleton(self, tagname): """can (and will) be overridden in subclasses""" return self.shortempty def _isinline(self, tagname): """can (and will) be overridden in subclasses""" return False
Python
#
Python
import re class _escape: def __init__(self): self.escape = { u'"' : u'&quot;', u'<' : u'&lt;', u'>' : u'&gt;', u'&' : u'&amp;', u"'" : u'&apos;', } self.charef_rex = re.compile(u"|".join(self.escape.keys())) def _replacer(self, match): return self.escape[match.group(0)] def __call__(self, ustring): """ xml-escape the given unicode string. """ return self.charef_rex.sub(self._replacer, ustring) escape = _escape()
Python
""" """ from py.xml import Namespace, Tag from py.__.xmlobj.visit import SimpleUnicodeVisitor class HtmlVisitor(SimpleUnicodeVisitor): single = dict([(x, 1) for x in ('br,img,area,param,col,hr,meta,link,base,' 'input,frame').split(',')]) inline = dict([(x, 1) for x in ('a abbr acronym b basefont bdo big br cite code dfn em font ' 'i img input kbd label q s samp select small span strike ' 'strong sub sup textarea tt u var'.split(' '))]) def repr_attribute(self, attrs, name): if name == 'class_': value = getattr(attrs, name) if value is None: return return super(HtmlVisitor, self).repr_attribute(attrs, name) def _issingleton(self, tagname): return tagname in self.single def _isinline(self, tagname): return tagname in self.inline class HtmlTag(Tag): def unicode(self, indent=2): l = [] HtmlVisitor(l.append, indent, shortempty=False).visit(self) return u"".join(l) # exported plain html namespace class html(Namespace): __tagclass__ = HtmlTag __stickyname__ = True __tagspec__ = dict([(x,1) for x in ( 'a,abbr,acronym,address,applet,area,b,bdo,big,blink,' 'blockquote,body,br,button,caption,center,cite,code,col,' 'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,' 'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,' 'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,' 'map,marquee,menu,meta,multicol,nobr,noembed,noframes,' 'noscript,object,ol,optgroup,option,p,pre,q,s,script,' 'select,small,span,strike,strong,style,sub,sup,table,' 'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,' 'base,basefont,frame,hr,isindex,param,samp,var' ).split(',') if x]) class Style(object): def __init__(self, **kw): for x, y in kw.items(): x = x.replace('_', '-') setattr(self, x, y)
Python
""" generic (and pythonic :-) xml tag and namespace objects """ class Tag(list): class Attr(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) def __init__(self, *args, **kwargs): super(Tag, self).__init__(args) self.attr = self.Attr(**kwargs) def __unicode__(self): return self.unicode(indent=0) def unicode(self, indent=2): from py.__.xmlobj.visit import SimpleUnicodeVisitor l = [] SimpleUnicodeVisitor(l.append, indent).visit(self) return u"".join(l) def __repr__(self): name = self.__class__.__name__ return "<%r tag object %d>" % (name, id(self)) class raw(object): """just a box that can contain a unicode string that will be included directly in the output""" def __init__(self, uniobj): self.uniobj = uniobj # the generic xml namespace # provides Tag classes on the fly optionally checking for # a tagspecification class NamespaceMetaclass(type): def __getattr__(self, name): if name[:1] == '_': raise AttributeError(name) if self == Namespace: raise ValueError("Namespace class is abstract") tagspec = self.__tagspec__ if tagspec is not None and name not in tagspec: raise AttributeError(name) classattr = {} if self.__stickyname__: classattr['xmlname'] = name cls = type(name, (self.__tagclass__,), classattr) setattr(self, name, cls) return cls class Namespace(object): __tagspec__ = None __tagclass__ = Tag __metaclass__ = NamespaceMetaclass __stickyname__ = False
Python
""" small and mean xml/html generation """
Python
#pythonexecutables = ('python2.2', 'python2.3',) #pythonexecutable = 'python2.2' # in the future we want to be able to say here: #def setup_module(extpy): # mod = extpy.resolve() # mod.module = 23 # directory = pypath.root.dirpath() # default values for options (modified from cmdline) verbose = 0 nocapture = False collectonly = False exitfirst = False fulltrace = False showlocals = False nomagic = False import py Option = py.test.config.Option option = py.test.config.addoptions("execnet options", Option('-S', '', action="store", dest="sshtarget", default=None, help=("target to run tests requiring ssh, e.g. " "user@codespeak.net")), Option('', '--apigenpath', action="store", dest="apigenpath", default="../apigen", type="string", help="relative path to apigen doc output location (relative from py/)"), Option('', '--docpath', action='store', dest='docpath', default="doc", type='string', help="relative path to doc output location (relative from py/)"), ) dist_rsync_roots = ['.']
Python
""" module for win-specific local path stuff (implementor needed :-) """ import os import py from py.__.path.local.common import Stat class WinMixin: def _makestat(self, statresult): return Stat(self, statresult) def chmod(self, mode, rec=0): """ change permissions to the given mode. If mode is an integer it directly encodes the os-specific modes. if rec is True perform recursively. (xxx if mode is a string then it specifies access rights in '/bin/chmod' style, e.g. a+r). """ if not isinstance(mode, int): raise TypeError("mode %r must be an integer" % (mode,)) if rec: for x in self.visit(rec=rec): self._callex(os.chmod, str(x), mode) self._callex(os.chmod, str(self), mode) def remove(self, rec=1): """ remove a file or directory (or a directory tree if rec=1). """ if self.check(dir=1, link=0): if rec: # force remove of readonly files on windows self.chmod(0700, rec=1) self._callex(py.std.shutil.rmtree, self.strpath) else: self._callex(os.rmdir, self.strpath) else: self.chmod(0700) self._callex(os.remove, self.strpath)
Python
""" specialized local path implementation. This Path implementation offers some methods like chmod(), owner() and so on that may only make sense on unix. """ from __future__ import generators import sys, os, stat, re, atexit import py from py.__.path import common iswin32 = sys.platform == "win32" if iswin32: from py.__.path.local.win import WinMixin as PlatformMixin else: from py.__.path.local.posix import PosixMixin as PlatformMixin class LocalPath(common.FSPathBase, PlatformMixin): """ Local path implementation offering access/modification methods similar to os.path. """ _path_cache = {} sep = os.sep class Checkers(common.FSCheckers): def _stat(self): try: return self._statcache except AttributeError: try: self._statcache = self.path.stat() except py.error.ELOOP: self._statcache = self.path.lstat() return self._statcache def dir(self): return stat.S_ISDIR(self._stat().mode) def file(self): return stat.S_ISREG(self._stat().mode) def exists(self): return self._stat() def link(self): st = self.path.lstat() return stat.S_ISLNK(st.mode) def __new__(cls, path=None): """ Initialize and return a local Path instance. Path can be relative to the current directory. If it is None then the current working directory is taken. Note that Path instances always carry an absolute path. Note also that passing in a local path object will simply return the exact same path object. Use new() to get a new copy. """ if isinstance(path, common.FSPathBase): if path.__class__ == cls: return path path = path.strpath # initialize the path self = object.__new__(cls) if not path: self.strpath = os.getcwd() elif isinstance(path, str): self.strpath = os.path.abspath(os.path.normpath(str(path))) else: raise ValueError( "can only pass None, Path instances " "or non-empty strings to LocalPath") assert isinstance(self.strpath, str) return self def __hash__(self): return hash(self.strpath) def computehash(self, hashtype="md5", chunksize=524288): """ return hexdigest of hashvalue for this file. """ hash = self._gethashinstance(hashtype) f = self.open('rb') try: while 1: buf = f.read(chunksize) if not buf: return hash.hexdigest() hash.update(buf) finally: f.close() def new(self, **kw): """ create a modified version of this path. the following keyword arguments modify various path parts: a:/some/path/to/a/file.ext || drive |-------------| dirname |------| basename |--| purebasename |--| ext """ obj = object.__new__(self.__class__) drive, dirname, basename, purebasename,ext = self._getbyspec( "drive,dirname,basename,purebasename,ext") if 'basename' in kw: if 'purebasename' in kw or 'ext' in kw: raise ValueError("invalid specification %r" % kw) else: pb = kw.setdefault('purebasename', purebasename) try: ext = kw['ext'] except KeyError: pass else: if ext and not ext.startswith('.'): ext = '.' + ext kw['basename'] = pb + ext kw.setdefault('drive', drive) kw.setdefault('dirname', dirname) kw.setdefault('sep', self.sep) obj.strpath = os.path.normpath( "%(drive)s%(dirname)s%(sep)s%(basename)s" % kw) return obj def _getbyspec(self, spec): """ return a sequence of specified path parts. 'spec' is a comma separated string containing path part names. according to the following convention: a:/some/path/to/a/file.ext || drive |-------------| dirname |------| basename |--| purebasename |--| ext """ res = [] parts = self.strpath.split(self.sep) args = filter(None, spec.split(',') ) append = res.append for name in args: if name == 'drive': append(parts[0]) elif name == 'dirname': append(self.sep.join(['']+parts[1:-1])) else: basename = parts[-1] if name == 'basename': append(basename) else: i = basename.rfind('.') if i == -1: purebasename, ext = basename, '' else: purebasename, ext = basename[:i], basename[i:] if name == 'purebasename': append(purebasename) elif name == 'ext': append(ext) else: raise ValueError, "invalid part specification %r" % name return res def join(self, *args, **kwargs): """ return a new path by appending all 'args' as path components. if abs=1 is used restart from root if any of the args is an absolute path. """ if not args: return self strpath = self.strpath sep = self.sep strargs = [str(x) for x in args] if kwargs.get('abs', 0): for i in range(len(strargs)-1, -1, -1): if os.path.isabs(strargs[i]): strpath = strargs[i] strargs = strargs[i+1:] break for arg in strargs: arg = arg.strip(sep) if py.std.sys.platform == 'win32': # allow unix style paths even on windows. arg = arg.strip('/') arg = arg.replace('/', sep) if arg: if not strpath.endswith(sep): strpath += sep strpath += arg obj = self.new() obj.strpath = os.path.normpath(strpath) return obj def __eq__(self, other): s1 = str(self) s2 = str(other) if iswin32: s1 = s1.lower() s2 = s2.lower() return s1 == s2 def open(self, mode='r'): """ return an opened file with the given mode. """ return self._callex(open, self.strpath, mode) def listdir(self, fil=None, sort=None): """ list directory contents, possibly filter by the given fil func and possibly sorted. """ if isinstance(fil, str): fil = common.fnmatch(fil) res = [] for name in self._callex(os.listdir, self.strpath): childurl = self.join(name) if fil is None or fil(childurl): res.append(childurl) if callable(sort): res.sort(sort) elif sort: res.sort() return res def size(self): """ return size of the underlying file object """ return self.stat().size def mtime(self): """ return last modification time of the path. """ return self.stat().mtime def copy(self, target, archive=False): """ copy path to target.""" assert not archive, "XXX archive-mode not supported" if self.check(file=1): if target.check(dir=1): target = target.join(self.basename) assert self!=target copychunked(self, target) else: target.ensure(dir=1) def rec(p): return p.check(link=0) for x in self.visit(rec=rec): relpath = x.relto(self) newx = target.join(relpath) if x.check(link=1): newx.mksymlinkto(x.readlink()) elif x.check(file=1): copychunked(x, newx) elif x.check(dir=1): newx.ensure(dir=1) def rename(self, target): """ rename this path to target. """ return self._callex(os.rename, str(self), str(target)) def dump(self, obj, bin=1): """ pickle object into path location""" f = self.open('wb') try: self._callex(py.std.cPickle.dump, obj, f, bin) finally: f.close() def mkdir(self, *args): """ create & return the directory joined with args. """ p = self.join(*args) self._callex(os.mkdir, str(p)) return p def write(self, content, mode='wb'): """ write string content into path. """ s = str(content) f = self.open(mode) try: f.write(s) finally: f.close() def _ensuredirs(self): parent = self.dirpath() if parent == self: return self if parent.check(dir=0): parent._ensuredirs() if self.check(dir=0): try: self.mkdir() except py.error.EEXIST: # race condition: file/dir created by another thread/process. # complain if it is not a dir if self.check(dir=0): raise return self def ensure(self, *args, **kwargs): """ ensure that an args-joined path exists (by default as a file). if you specify a keyword argument 'dir=True' then the path is forced to be a directory path. """ p = self.join(*args) if kwargs.get('dir', 0): return p._ensuredirs() else: p.dirpath()._ensuredirs() if not p.check(file=1): p.write("") return p def stat(self): """ Return an os.stat() tuple. """ stat = self._callex(os.stat, self.strpath) return self._makestat(stat) def lstat(self): """ Return an os.lstat() tuple. """ return self._makestat(self._callex(os.lstat, self.strpath)) # xlocal implementation def setmtime(self, mtime=None): """ set modification time for the given path. if 'mtime' is None (the default) then the file's mtime is set to current time. Note that the resolution for 'mtime' is platform dependent. """ if mtime is None: return self._callex(os.utime, self.strpath, mtime) try: return self._callex(os.utime, self.strpath, (-1, mtime)) except py.error.EINVAL: return self._callex(os.utime, self.strpath, (self.atime(), mtime)) def chdir(self): """ change directory to self and return old current directory """ old = self.__class__() self._callex(os.chdir, self.strpath) return old def realpath(self): """ return a new path which contains no symbolic links.""" return self.__class__(os.path.realpath(self.strpath)) def atime(self): """ return last access time of the path. """ return self.stat().atime def __repr__(self): return 'local(%r)' % self.strpath def __str__(self): """ return string representation of the Path. """ return self.strpath def pypkgpath(self, pkgname=None): """ return the path's package path by looking for the given pkgname. If pkgname is None then look for the last directory upwards which still contains an __init__.py. Return None if a pkgpath can not be determined. """ pkgpath = None for parent in self.parts(reverse=True): if pkgname is None: if parent.check(file=1): continue if parent.join('__init__.py').check(): pkgpath = parent continue return pkgpath else: if parent.basename == pkgname: return parent return pkgpath def _prependsyspath(self, path): s = str(path) if s != sys.path[0]: #print "prepending to sys.path", s sys.path.insert(0, s) def pyimport(self, modname=None, ensuresyspath=True): """ return path as an imported python module. if modname is None, look for the containing package and construct an according module name. The module will be put/looked up in sys.modules. """ if not self.check(): raise py.error.ENOENT(self) #print "trying to import", self pkgpath = None if modname is None: #try: # return self._module #except AttributeError: # pass pkgpath = self.pypkgpath() if pkgpath is not None: if ensuresyspath: self._prependsyspath(pkgpath.dirpath()) pkg = __import__(pkgpath.basename, None, None, []) if hasattr(pkg, '__package__'): modname = pkg.__package__.getimportname(self) assert modname is not None, "package %s doesn't know %s" % ( pkg.__name__, self) else: names = self.new(ext='').relto(pkgpath.dirpath()) names = names.split(self.sep) modname = ".".join(names) else: # no package scope, still make it possible if ensuresyspath: self._prependsyspath(self.dirpath()) modname = self.purebasename mod = __import__(modname, None, None, ['__doc__']) #self._module = mod return mod else: try: return sys.modules[modname] except KeyError: # we have a custom modname, do a pseudo-import mod = py.std.new.module(modname) mod.__file__ = str(self) sys.modules[modname] = mod try: execfile(str(self), mod.__dict__) except: del sys.modules[modname] raise return mod def _getpymodule(self): """resolve this path to a module python object. """ if self.ext != '.c': return super(LocalPath, self)._getpymodule() from py.__.misc.buildcmodule import make_module_from_c mod = make_module_from_c(self) return mod def _getpycodeobj(self): """ read the path and compile it to a code object. """ dotpy = self.check(ext='.py') if dotpy: my_magic = py.std.imp.get_magic() my_timestamp = int(self.mtime()) if __debug__: pycfile = self + 'c' else: pycfile = self + 'o' try: f = pycfile.open('rb') try: header = f.read(8) if len(header) == 8: magic, timestamp = py.std.struct.unpack('<4si', header) if magic == my_magic and timestamp == my_timestamp: co = py.std.marshal.load(f) path1 = co.co_filename path2 = str(self) if path1 == path2: return co try: if os.path.samefile(path1, path2): return co except (OSError, # probably path1 not found AttributeError): # samefile() not available pass finally: f.close() except py.error.Error: pass s = self.read(mode='rU') + '\n' codeobj = compile(s, str(self), 'exec', generators.compiler_flag) if dotpy: try: f = pycfile.open('wb') f.write(py.std.struct.pack('<4si', 'TEMP', -1)) # fixed below py.std.marshal.dump(codeobj, f) f.flush() f.seek(0) f.write(py.std.struct.pack('<4si', my_magic, my_timestamp)) f.close() except py.error.Error: pass return codeobj def sysexec(self, *argv): """ return stdout-put from executing a system child process, where the self path points to the binary (XXX or script) to be executed. Note that this process is directly invoked and not through a system shell. """ from py.compat.subprocess import Popen, PIPE argv = map(str, argv) proc = Popen([str(self)] + list(argv), stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() ret = proc.wait() if ret != 0: raise py.process.cmdexec.Error(ret, ret, str(self), stdout, stderr,) return stdout def sysfind(cls, name, checker=None): """ return a path object found by looking at the systems underlying PATH specification. If the checker is not None it will be invoked to filter matching paths. If a binary cannot be found, None is returned Note: This is probably not working on plain win32 systems but may work on cygwin. """ if os.path.isabs(name): p = py.path.local(name) if p.check(file=1): return p else: if py.std.sys.platform == 'win32': paths = py.std.os.environ['Path'].split(';') try: systemroot = os.environ['SYSTEMROOT'] except KeyError: pass else: paths = [re.sub('%SystemRoot%', systemroot, path) for path in paths] tryadd = '', '.exe', '.com', '.bat' # XXX add more? else: paths = py.std.os.environ['PATH'].split(':') tryadd = ('',) for x in paths: for addext in tryadd: p = py.path.local(x).join(name, abs=True) + addext try: if p.check(file=1): if checker: if not checker(p): continue return p except py.error.EACCES: pass return None sysfind = classmethod(sysfind) def _gethomedir(cls): try: x = os.environ['HOME'] except KeyError: x = os.environ['HOMEPATH'] return cls(x) _gethomedir = classmethod(_gethomedir) #""" #special class constructors for local filesystem paths #""" def get_temproot(cls): """ return the system's temporary directory (where tempfiles are usually created in) """ return py.path.local(py.std.tempfile.gettempdir()) get_temproot = classmethod(get_temproot) def mkdtemp(cls): """ return a Path object pointing to a fresh new temporary directory (which we created ourself). """ import tempfile tries = 10 for i in range(tries): dname = tempfile.mktemp() dpath = cls(tempfile.mktemp()) try: dpath.mkdir() except (py.error.EEXIST, py.error.EPERM, py.error.EACCES): continue return dpath raise py.error.ENOENT(dpath, "could not create tempdir, %d tries" % tries) mkdtemp = classmethod(mkdtemp) def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, lock_timeout = 172800): # two days """ return unique directory with a number greater than the current maximum one. The number is assumed to start directly after prefix. if keep is true directories with a number less than (maxnum-keep) will be removed. """ if rootdir is None: rootdir = cls.get_temproot() def parse_num(path): """ parse the number out of a path (if it matches the prefix) """ bn = path.basename if bn.startswith(prefix): try: return int(bn[len(prefix):]) except ValueError: pass # compute the maximum number currently in use with the # prefix lastmax = None while True: maxnum = -1 for path in rootdir.listdir(): num = parse_num(path) if num is not None: maxnum = max(maxnum, num) # make the new directory try: udir = rootdir.mkdir(prefix + str(maxnum+1)) except py.error.EEXIST: # race condition: another thread/process created the dir # in the meantime. Try counting again if lastmax == maxnum: raise lastmax = maxnum continue break # put a .lock file in the new directory that will be removed at # process exit lockfile = udir.join('.lock') mypid = os.getpid() if hasattr(lockfile, 'mksymlinkto'): lockfile.mksymlinkto(str(mypid)) else: lockfile.write(str(mypid)) def try_remove_lockfile(): # in a fork() situation, only the last process should # remove the .lock, otherwise the other processes run the # risk of seeing their temporary dir disappear. For now # we remove the .lock in the parent only (i.e. we assume # that the children finish before the parent). if os.getpid() != mypid: return try: lockfile.remove() except py.error.Error: pass atexit.register(try_remove_lockfile) # prune old directories if keep: for path in rootdir.listdir(): num = parse_num(path) if num is not None and num <= (maxnum - keep): lf = path.join('.lock') try: t1 = lf.lstat().mtime t2 = lockfile.lstat().mtime if abs(t2-t1) < lock_timeout: continue # skip directories still locked except py.error.Error: pass # assume that it means that there is no 'lf' try: path.remove(rec=1) except py.error.Error: pass # make link... try: username = os.environ['USER'] #linux, et al except KeyError: try: username = os.environ['USERNAME'] #windows except KeyError: username = 'current' src = str(udir) dest = src[:src.rfind('-')] + '-' + username try: os.unlink(dest) except OSError: pass try: os.symlink(src, dest) except (OSError, AttributeError): # AttributeError on win32 pass return udir make_numbered_dir = classmethod(make_numbered_dir) def copychunked(src, dest): chunksize = 524288 # half a meg of bytes fsrc = src.open('rb') try: fdest = dest.open('wb') try: while 1: buf = fsrc.read(chunksize) if not buf: break fdest.write(buf) finally: fdest.close() finally: fsrc.close()
Python
#
Python
""" module to access local filesystem pathes (mostly filename manipulations but also file operations) """ import os, sys, stat import py #__________________________________________________________ # # Local Path Posix Mixin #__________________________________________________________ from py.__.path.local.common import Stat class PosixStat(Stat): def owner(self): entry = self.path._callex(py.std.pwd.getpwuid, self.uid) return entry[0] owner = property(owner, None, None, "owner of path") def group(self): """ return group name of file. """ entry = self.path._callex(py.std.grp.getgrgid, self.gid) return entry[0] group = property(group) class PosixMixin(object): def _makestat(self, statresult): return PosixStat(self, statresult) def _deprecated(self, name): py.std.warnings.warn("'path.%s()' is deprecated, use " "'path.stat().%s' instead." % (name,name), DeprecationWarning, stacklevel=3) # an instance needs to be a local path instance def owner(self): """ return owner name of file. """ self._deprecated('owner') return self.stat().owner def group(self): """ return group name of file. """ self._deprecated('group') return self.stat().group def mode(self): """ return permission mode of the path object """ self._deprecated('mode') return self.stat().mode def chmod(self, mode, rec=0): """ change permissions to the given mode. If mode is an integer it directly encodes the os-specific modes. if rec is True perform recursively. (xxx if mode is a string then it specifies access rights in '/bin/chmod' style, e.g. a+r). """ if not isinstance(mode, int): raise TypeError("mode %r must be an integer" % (mode,)) if rec: for x in self.visit(rec=rec): self._callex(os.chmod, str(x), mode) self._callex(os.chmod, str(self), mode) def chown(self, user, group, rec=0): """ change ownership to the given user and group. user and group may be specified by a number or by a name. if rec is True change ownership recursively. """ uid = getuserid(user) gid = getgroupid(group) if rec: for x in self.visit(rec=lambda x: x.check(link=0)): if x.check(link=0): self._callex(os.chown, str(x), uid, gid) self._callex(os.chown, str(self), uid, gid) def readlink(self): """ return value of a symbolic link. """ return self._callex(os.readlink, self.strpath) def mklinkto(self, oldname): """ posix style hard link to another name. """ self._callex(os.link, str(oldname), str(self)) def mksymlinkto(self, value, absolute=1): """ create a symbolic link with the given value (pointing to another name). """ if absolute: self._callex(os.symlink, str(value), self.strpath) else: base = self.common(value) # with posix local paths '/' is always a common base relsource = self.__class__(value).relto(base) reldest = self.relto(base) n = reldest.count(self.sep) target = self.sep.join(('..', )*n + (relsource, )) self._callex(os.symlink, target, self.strpath) def remove(self, rec=1): """ remove a file or directory (or a directory tree if rec=1). """ if self.check(dir=1, link=0): if rec: self._callex(py.std.shutil.rmtree, self.strpath) else: self._callex(os.rmdir, self.strpath) else: self._callex(os.remove, self.strpath) def getuserid(user): import pwd if isinstance(user, int): return user entry = pwd.getpwnam(user) return entry[2] def getgroupid(group): import grp if isinstance(group, int): return group entry = grp.getgrnam(group) return entry[2]
Python
import py class Stat(object): def __init__(self, path, osstatresult): self.path = path self._osstatresult = osstatresult for name in ('atime blksize blocks ctime dev gid ' 'ino mode mtime nlink rdev size uid'.split()): code = """if 1: def fget(self): return getattr(self._osstatresult, "st_%(name)s", None) %(name)s = property(fget) def fget_deprecated(self): py.std.warnings.warn("statresult.st_%(name)s is deprecated, use " "statresult.%(name)s instead.", DeprecationWarning, stacklevel=2) return getattr(self._osstatresult, "st_%(name)s", None) st_%(name)s = property(fget_deprecated) """ % locals() exec code del fget del fget_deprecated
Python
#
Python
import py from py.__.path.testing import common def setuptestfs(path): if path.join('samplefile').check(): return #print "setting up test fs for", repr(path) samplefile = path.ensure('samplefile') samplefile.write('samplefile\n') execfile = path.ensure('execfile') execfile.write('x=42') execfilepy = path.ensure('execfile.py') execfilepy.write('x=42') d = {1:2, 'hello': 'world', 'answer': 42} path.ensure('samplepickle').dump(d) sampledir = path.ensure('sampledir', dir=1) sampledir.ensure('otherfile') otherdir = path.ensure('otherdir', dir=1) otherdir.ensure('__init__.py') module_a = otherdir.ensure('a.py') module_a.write('from b import stuff as result\n') module_b = otherdir.ensure('b.py') module_b.write('stuff="got it"\n') module_c = otherdir.ensure('c.py') module_c.write('''import py; py.magic.autopath() import otherdir.a value = otherdir.a.result ''') module_d = otherdir.ensure('d.py') module_d.write('''import py; py.magic.autopath() from otherdir import a value2 = a.result ''') class CommonFSTests(common.CommonPathTests): root = None # subclasses have to provide a current 'root' attribute def test_join_div_operator(self): newpath = self.root / '/sampledir' / '/test//' newpath2 = self.root.join('sampledir', 'test') assert newpath == newpath2 def test_ext(self): newpath = self.root.join('sampledir.ext') assert newpath.ext == '.ext' newpath = self.root.join('sampledir') assert not newpath.ext def test_purebasename(self): newpath = self.root.join('samplefile.py') assert newpath.purebasename == 'samplefile' def test_multiple_parts(self): newpath = self.root.join('samplefile.py') dirname, purebasename, basename, ext = newpath._getbyspec( 'dirname,purebasename,basename,ext') assert str(self.root).endswith(dirname) # be careful with win32 'drive' assert purebasename == 'samplefile' assert basename == 'samplefile.py' assert ext == '.py' def test_dotted_name_ext(self): newpath = self.root.join('a.b.c') ext = newpath.ext assert ext == '.c' assert newpath.ext == '.c' def test_newext(self): newpath = self.root.join('samplefile.py') newext = newpath.new(ext='.txt') assert newext.basename == "samplefile.txt" assert newext.purebasename == "samplefile" def test_readlines(self): fn = self.root.join('samplefile') contents = fn.readlines() assert contents == ['samplefile\n'] def test_readlines_nocr(self): fn = self.root.join('samplefile') contents = fn.readlines(cr=0) assert contents == ['samplefile', ''] def test_file(self): assert self.root.join('samplefile').check(file=1) def test_not_file(self): assert not self.root.join("sampledir").check(file=1) assert self.root.join("sampledir").check(file=0) #def test_fnmatch_dir(self): def test_non_existent(self): assert self.root.join("sampledir.nothere").check(dir=0) assert self.root.join("sampledir.nothere").check(file=0) assert self.root.join("sampledir.nothere").check(notfile=1) assert self.root.join("sampledir.nothere").check(notdir=1) assert self.root.join("sampledir.nothere").check(notexists=1) assert not self.root.join("sampledir.nothere").check(notfile=0) # pattern = self.root.sep.join(['s*file']) # sfile = self.root.join("samplefile") # assert sfile.check(fnmatch=pattern) def test_size(self): url = self.root.join("samplefile") assert url.size() > len("samplefile") def test_mtime(self): url = self.root.join("samplefile") assert url.mtime() > 0 def test_relto_wrong_type(self): py.test.raises(TypeError, "self.root.relto(42)") def test_visit_filesonly(self): l = [] for i in self.root.visit(lambda x: x.check(file=1)): l.append(i.relto(self.root)) assert not "sampledir" in l assert self.root.sep.join(["sampledir", "otherfile"]) in l def test_load(self): p = self.root.join('samplepickle') obj = p.load() assert type(obj) is dict assert obj.get('answer',None) == 42 def test_visit_nodotfiles(self): l = [] for i in self.root.visit(lambda x: x.check(dotfile=0)): l.append(i.relto(self.root)) assert "sampledir" in l assert self.root.sep.join(["sampledir", "otherfile"]) in l assert not ".dotfile" in l def test_endswith(self): def chk(p): return p.check(endswith="pickle") assert not chk(self.root) assert not chk(self.root.join('samplefile')) assert chk(self.root.join('somepickle')) def test_copy_file(self): otherdir = self.root.join('otherdir') initpy = otherdir.join('__init__.py') copied = otherdir.join('copied') initpy.copy(copied) try: assert copied.check() s1 = initpy.read() s2 = copied.read() assert s1 == s2 finally: if copied.check(): copied.remove() def test_copy_dir(self): otherdir = self.root.join('otherdir') copied = self.root.join('newdir') try: otherdir.copy(copied) assert copied.check(dir=1) assert copied.join('__init__.py').check(file=1) s1 = otherdir.join('__init__.py').read() s2 = copied.join('__init__.py').read() assert s1 == s2 finally: if copied.check(dir=1): copied.remove(rec=1) def test_remove_file(self): d = self.root.ensure('todeleted') assert d.check() d.remove() assert not d.check() def test_remove_dir_recursive_by_default(self): d = self.root.ensure('to', 'be', 'deleted') assert d.check() p = self.root.join('to') p.remove() assert not p.check() def test_mkdir_and_remove(self): tmpdir = self.root py.test.raises(py.error.EEXIST, tmpdir.mkdir, 'sampledir') new = tmpdir.join('mktest1') new.mkdir() assert new.check(dir=1) new.remove() new = tmpdir.mkdir('mktest') assert new.check(dir=1) new.remove() assert tmpdir.join('mktest') == new def test_move_file(self): p = self.root.join('samplefile') newp = p.dirpath('moved_samplefile') p.move(newp) assert newp.check(file=1) assert not p.check() def test_move_directory(self): source = self.root.join('sampledir') dest = self.root.join('moveddir') source.move(dest) assert dest.check(dir=1) assert dest.join('otherfile').check(file=1) assert not source.join('sampledir').check() def test__getpymodule(self): obj = self.root.join('execfile')._getpymodule() assert obj.x == 42 def test_not_has_resolve(self): # because this would mean confusion with respect to # py.path.extpy assert not hasattr(self.root, 'resolve') def test__getpymodule_a(self): otherdir = self.root.join('otherdir') mod = otherdir.join('a.py')._getpymodule() assert mod.result == "got it" def test__getpymodule_b(self): otherdir = self.root.join('otherdir') mod = otherdir.join('b.py')._getpymodule() assert mod.stuff == "got it" def test__getpymodule_c(self): otherdir = self.root.join('otherdir') mod = otherdir.join('c.py')._getpymodule() assert mod.value == "got it" def test__getpymodule_d(self): otherdir = self.root.join('otherdir') mod = otherdir.join('d.py')._getpymodule() assert mod.value2 == "got it"
Python
from py.__.path.common import checker import py class CommonPathTests(object): root = None # subclasses have to setup a 'root' attribute def test_constructor_equality(self): p = self.root.__class__(self.root) assert p == self.root def test_eq_nonstring(self): path1 = self.root.join('sampledir') path2 = self.root.join('sampledir') assert path1 == path2 def test_new_identical(self): assert self.root == self.root.new() def test_join(self): p = self.root.join('sampledir') strp = str(p) assert strp.endswith('sampledir') assert strp.startswith(str(self.root)) def test_join_normalized(self): newpath = self.root.join(self.root.sep+'sampledir') strp = str(newpath) assert strp.endswith('sampledir') assert strp.startswith(str(self.root)) newpath = self.root.join((self.root.sep*2) + 'sampledir') strp = str(newpath) assert strp.endswith('sampledir') assert strp.startswith(str(self.root)) def test_join_noargs(self): newpath = self.root.join() assert self.root == newpath def test_add_something(self): p = self.root.join('sample') p = p + 'dir' assert p.check() def test_parts(self): newpath = self.root.join('sampledir', 'otherfile') par = newpath.parts()[-3:] assert par == [self.root, self.root.join('sampledir'), newpath] revpar = newpath.parts(reverse=True)[:3] assert revpar == [newpath, self.root.join('sampledir'), self.root] def test_common(self): other = self.root.join('sampledir') x = other.common(self.root) assert x == self.root #def test_parents_nonexisting_file(self): # newpath = self.root / 'dirnoexist' / 'nonexisting file' # par = list(newpath.parents()) # assert par[:2] == [self.root / 'dirnoexist', self.root] def test_basename_checks(self): newpath = self.root.join('sampledir') assert newpath.check(basename='sampledir') assert newpath.check(notbasename='xyz') assert newpath.basename == 'sampledir' def test_basename(self): newpath = self.root.join('sampledir') assert newpath.check(basename='sampledir') assert newpath.basename, 'sampledir' def test_dirpath(self): newpath = self.root.join('sampledir') assert newpath.dirpath() == self.root def test_dirpath_with_args(self): newpath = self.root.join('sampledir') assert newpath.dirpath('x') == self.root.join('x') def test_newbasename(self): newpath = self.root.join('samplefile') newbase = newpath.new(basename="samplefile2") assert newbase.basename == "samplefile2" assert newbase.dirpath() == newpath.dirpath() def test_not_exists(self): assert not self.root.join('does_not_exist').check() assert self.root.join('does_not_exist').check(exists=0) def test_exists(self): assert self.root.join("samplefile").check() assert self.root.join("samplefile").check(exists=1) def test_dir(self): #print repr(self.root.join("sampledir")) assert self.root.join("sampledir").check(dir=1) assert self.root.join('samplefile').check(notdir=1) assert not self.root.join("samplefile").check(dir=1) def test_filter_dir(self): assert checker(dir=1)(self.root.join("sampledir")) def test_fnmatch_file(self): assert self.root.join("samplefile").check(fnmatch='s*e') assert self.root.join("samplefile").check(notfnmatch='s*x') assert not self.root.join("samplefile").check(fnmatch='s*x') #def test_fnmatch_dir(self): # pattern = self.root.sep.join(['s*file']) # sfile = self.root.join("samplefile") # assert sfile.check(fnmatch=pattern) def test_relto(self): l=self.root.join("sampledir", "otherfile") assert l.relto(self.root) == l.sep.join(["sampledir", "otherfile"]) assert l.check(relto=self.root) assert self.root.check(notrelto=l) assert not self.root.check(relto=l) def test_relto_not_relative(self): l1=self.root.join("bcde") l2=self.root.join("b") assert not l1.relto(l2) assert not l2.relto(l1) def test_listdir(self): l = self.root.listdir() assert self.root.join('sampledir') in l assert self.root.join('samplefile') in l py.test.raises(py.error.ENOTDIR, "self.root.join('samplefile').listdir()") def test_listdir_fnmatchstring(self): l = self.root.listdir('s*dir') assert len(l) assert l[0], self.root.join('sampledir') def test_listdir_filter(self): l = self.root.listdir(checker(dir=1)) assert self.root.join('sampledir') in l assert not self.root.join('samplefile') in l def test_listdir_sorted(self): l = self.root.listdir(checker(basestarts="sample"), sort=True) assert self.root.join('sampledir') == l[0] assert self.root.join('samplefile') == l[1] assert self.root.join('samplepickle') == l[2] def test_visit_nofilter(self): l = [] for i in self.root.visit(): l.append(i.relto(self.root)) assert "sampledir" in l assert self.root.sep.join(["sampledir", "otherfile"]) in l def test_visit_norecurse(self): l = [] for i in self.root.visit(None, lambda x: x.basename != "sampledir"): l.append(i.relto(self.root)) assert "sampledir" in l assert not self.root.sep.join(["sampledir", "otherfile"]) in l def test_visit_filterfunc_is_string(self): l = [] for i in self.root.visit('*dir'): l.append(i.relto(self.root)) assert len(l), 2 assert "sampledir" in l assert "otherdir" in l def test_visit_ignore(self): p = self.root.join('nonexisting') assert list(p.visit(ignore=py.error.ENOENT)) == [] def test_visit_endswith(self): l = [] for i in self.root.visit(checker(endswith="file")): l.append(i.relto(self.root)) assert self.root.sep.join(["sampledir", "otherfile"]) in l assert "samplefile" in l def test_endswith(self): assert self.root.check(notendswith='.py') x = self.root.join('samplefile') assert x.check(endswith='file') def test_cmp(self): path1 = self.root.join('samplefile') path2 = self.root.join('samplefile2') assert cmp(path1, path2) == cmp('samplefile', 'samplefile2') assert cmp(path1, path1) == 0 def test_contains_path(self): path1 = self.root.join('samplefile') assert path1 in self.root assert not self.root.join('not existing') in self.root def test_contains_path_with_basename(self): assert 'samplefile' in self.root assert 'not_existing' not in self.root def featuretest_check_docstring(self): here = self.root.__class__ assert here.check.__doc__ doc = here.check.__doc__ for name in dir(local.Checkers): if name[0] != '_': assert name in doc def test_simple_read(self): x = self.root.join('samplefile').read('ru') assert x == 'samplefile\n'
Python
#
Python
""" module with base functionality for std.path package """ from __future__ import generators import os, sys import py def checktype(pathinstance, kw): names = ('local', 'svnwc', 'svnurl', 'py', ) for name,value in kw.items(): if name in names: cls = getattr(py.path, name) if bool(isinstance(pathinstance, cls)) ^ bool(value): return False del kw[name] return True class checker: """ deprecated: return checker callable checking for the given kwargs-specified specification. """ def __init__(self, **kwargs): py.std.warnings.warn("py.path.checker is deprecated, construct " "calls to pathobj.check() instead", DeprecationWarning, stacklevel=2) self.kwargs = kwargs def __call__(self, p): return p.check(**self.kwargs) class Checkers: _depend_on_existence = 'exists', 'link' def __init__(self, path): self.path = path def exists(self): raise NotImplementedError def basename(self, arg): return self.path.basename == arg def basestarts(self, arg): return self.path.basename.startswith(arg) def relto(self, arg): return self.path.relto(arg) def fnmatch(self, arg): return fnmatch(arg)(self.path) def endswith(self, arg): return str(self.path).endswith(arg) def _evaluate(self, kw): for name, value in kw.items(): invert = False meth = None try: meth = getattr(self, name) except AttributeError: if name[:3] == 'not': invert = True try: meth = getattr(self, name[3:]) except AttributeError: pass if meth is None: raise TypeError, "no %r checker available for %r" % (name, self.path) try: if meth.im_func.func_code.co_argcount > 1: if (not meth(value)) ^ invert: return False else: if bool(value) ^ bool(meth()) ^ invert: return False except (py.error.ENOENT, py.error.ENOTDIR): for name in self._depend_on_existence: if name in kw: if kw.get(name): return False name = 'not' + name if name in kw: if not kw.get(name): return False return True class _dummyclass: pass class PathBase(object): """ shared implementation for filesystem path objects.""" Checkers = Checkers def check(self, **kw): """ check a path for existence, or query its properties without arguments, this returns True if the path exists (on the filesystem), False if not with (keyword only) arguments, the object compares the value of the argument with the value of a property with the same name (if it has one, else it raises a TypeError) when for example the keyword argument 'ext' is '.py', this will return True if self.ext == '.py', False otherwise """ if kw: kw = kw.copy() if not checktype(self, kw): return False else: kw = {'exists' : 1} return self.Checkers(self)._evaluate(kw) def __iter__(self): for i in self.listdir(): yield i def __contains__(self, other): if isinstance(other, str): return self.join(other).check() else: if other.dirpath() != self: return False p = self.join(other.basename) return p.check() def basename(self): return self._getbyspec('basename')[0] basename = property(basename, None, None, 'basename part of path') def relto(self, relpath): """ return a string which is the relative part of the path to the given 'relpath'. """ if not isinstance(relpath, (str, PathBase)): raise TypeError("%r: not a string or path object" %(relpath,)) strrelpath = str(relpath) if strrelpath and strrelpath[-1] != self.sep: strrelpath += self.sep #assert strrelpath[-1] == self.sep #assert strrelpath[-2] != self.sep strself = str(self) if strself.startswith(strrelpath): return strself[len(strrelpath):] return "" def parts(self, reverse=False): """ return a root-first list of all ancestor directories plus the path itself. """ current = self l = [self] while 1: last = current current = current.dirpath() if last == current: break l.insert(0, current) if reverse: l.reverse() return l def common(self, other): """ return the common part shared with the other path or None if there is no common part. """ last = None for x, y in zip(self.parts(), other.parts()): if x != y: return last last = x return last def __add__(self, other): """ return new path object with 'other' added to the basename""" return self.new(basename=self.basename+str(other)) def __cmp__(self, other): """ return sort value (-1, 0, +1). """ try: return cmp(self.strpath, other.strpath) except AttributeError: return cmp(str(self), str(other)) # self.path, other.path) def __repr__(self): """ return a string representation of this path. """ return repr(str(self)) def visit(self, fil=None, rec=None, ignore=_dummyclass): """ yields all paths below the current one fil is a filter (glob pattern or callable), if not matching the path will not be yielded, defaulting to None (everything is returned) rec is a filter (glob pattern or callable) that controls whether a node is descended, defaulting to None ignore is an Exception class that is ignoredwhen calling dirlist() on any of the paths (by default, all exceptions are reported) """ if isinstance(fil, str): fil = fnmatch(fil) if rec: if isinstance(rec, str): rec = fnmatch(fil) elif not callable(rec): rec = lambda x: True reclist = [self] while reclist: current = reclist.pop(0) try: dirlist = current.listdir() except ignore: return for p in dirlist: if fil is None or fil(p): yield p if p.check(dir=1) and (rec is None or rec(p)): reclist.append(p) def _callex(self, func, *args): """ call a function and raise errno-exception if applicable. """ __tracebackhide__ = True try: return func(*args) except py.error.Error: raise except EnvironmentError, e: if not hasattr(e, 'errno'): raise __tracebackhide__ = False cls, value, tb = sys.exc_info() errno = e.errno try: if not isinstance(e, WindowsError): raise NameError except NameError: # we are not on Windows, or we got a proper OSError cls = py.error._geterrnoclass(errno) else: try: cls = py.error._getwinerrnoclass(errno) except KeyError: raise cls, value, tb value = cls("%s%r" % (func.__name__, args)) __tracebackhide__ = True raise cls, value def _gethashinstance(self, hashtype): if hashtype == "md5": return py.std.md5.md5() elif hashtype == "sha": return py.std.sha.sha() else: raise ValueError("unknown hash type: %r" %(hashtype,)) class fnmatch: def __init__(self, pattern): self.pattern = pattern def __call__(self, path): """return true if the basename/fullname matches the glob-'pattern'. * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq if the pattern contains a path-separator then the full path is used for pattern matching and a '*' is prepended to the pattern. if the pattern doesn't contain a path-separator the pattern is only matched against the basename. """ pattern = self.pattern if pattern.find(path.sep) == -1: name = path.basename else: name = str(path) # path.strpath # XXX svn? pattern = '*' + path.sep + pattern from fnmatch import fnmatch return fnmatch(name, pattern) class FSCheckers(Checkers): _depend_on_existence = Checkers._depend_on_existence+('dir', 'file') def dir(self): raise NotImplementedError def file(self): raise NotImplementedError def dotfile(self): return self.path.basename.startswith('.') def ext(self, arg): if not arg.startswith('.'): arg = '.' + arg return self.path.ext == arg class FSPathBase(PathBase): """ shared implementation for filesystem path objects.""" Checkers = FSCheckers def __div__(self, other): return self.join(str(other)) def dirpath(self, *args, **kwargs): """ return the directory Path of the current Path joined with any given path arguments. """ return self.new(basename='').join(*args, **kwargs) def ext(self): """ extension of the path (including the '.').""" return self._getbyspec('ext')[0] ext = property(ext, None, None, 'extension part of path') def purebasename(self): """ pure base name of the path.""" return self._getbyspec('purebasename')[0] purebasename = property(purebasename, None, None, 'basename without extension') def read(self, mode='rb'): """ read and return a bytestring from reading the path. """ if py.std.sys.version_info < (2,3): for x in 'u', 'U': if x in mode: mode = mode.replace(x, '') f = self.open(mode) try: return f.read() finally: f.close() def readlines(self, cr=1): """ read and return a list of lines from the path. if cr is False, the newline will be removed from the end of each line. """ if not cr: content = self.read('rU') return content.split('\n') else: f = self.open('rU') try: return f.readlines() finally: f.close() def load(self): """ return object unpickled from self.read() """ f = self.open('rb') try: from cPickle import load return self._callex(load, f) finally: f.close() def move(self, target): """ move this path to target. """ if target.relto(self): raise py.error.EINVAL(target, "cannot move path into a subdirectory of itself") try: self.rename(target) except py.error.EXDEV: # invalid cross-device link self.copy(target) self.remove() def _getpymodule(self): """resolve this path to a module python object. """ modname = str(self) modname = modname.replace('.', self.sep) try: return sys.modules[modname] except KeyError: co = self._getpycodeobj() mod = py.std.new.module(modname) mod.__file__ = PathStr(self) if self.basename == '__init__.py': mod.__path__ = [str(self.dirpath())] sys.modules[modname] = mod try: exec co in mod.__dict__ except: del sys.modules[modname] raise return mod def _getpycodeobj(self): """ read the path and compile it to a py.code.Code object. """ s = self.read('rU') # XXX str(self) should show up somewhere in the code's filename return py.code.compile(s) class PathStr(str): def __init__(self, path): global old_import_hook self.__path__ = path if old_import_hook is None: import __builtin__ old_import_hook = __builtin__.__import__ __builtin__.__import__ = custom_import_hook def relativeimport(p, name, parent=None): names = name.split('.') last_list = [False] * (len(names)-1) + [True] modules = [] for name, is_last in zip(names, last_list): if hasattr(parent, name): # shortcut if there is already the correct name # in the parent package submodule = getattr(parent, name) else: if is_last and p.new(basename=name+'.py').check(): p = p.new(basename=name+'.py') else: p = p.new(basename=name).join('__init__.py') if not p.check(): return None # not found submodule = p._getpymodule() if parent is not None: setattr(parent, name, submodule) modules.append(submodule) parent = submodule return modules # success old_import_hook = None def custom_import_hook(name, glob=None, loc=None, fromlist=None): __tracebackhide__ = False __file__ = glob and glob.get('__file__') if isinstance(__file__, PathStr): # try to perform a relative import # for cooperation with py.magic.autopath, first look in the pkgdir modules = None if hasattr(__file__.__path__, 'pkgdir'): modules = relativeimport(__file__.__path__.pkgdir, name) if not modules: modules = relativeimport(__file__.__path__, name) if modules: if fromlist: submodule = modules[-1] # innermost submodule # try to import submodules named in the 'fromlist' if the # 'submodule' is a package p = submodule.__file__.__path__ if p.check(basename='__init__.py'): for name in fromlist: relativeimport(p, name, parent=submodule) # failures are fine return submodule else: return modules[0] # outermost package # fall-back __tracebackhide__ = True return old_import_hook(name, glob, loc, fromlist)
Python
""" module defining a subversion path object based on the external command 'svn'. """ import os, sys, time, re, calendar import py from py import path, process from py.__.path import common from py.__.path.svn import svncommon from py.__.misc.cache import BuildcostAccessCache, AgingCache DEBUG=False class SvnCommandPath(svncommon.SvnPathBase): """ path implementation that offers access to (possibly remote) subversion repositories. """ _lsrevcache = BuildcostAccessCache(maxentries=128) _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) def __new__(cls, path, rev=None): self = object.__new__(cls) if isinstance(path, cls): rev = path.rev path = path.strpath proto, uri = path.split("://", 1) host, uripath = uri.split('/', 1) # only check for bad chars in the non-protocol parts if (svncommon._check_for_bad_chars(host, svncommon.ALLOWED_CHARS_HOST) or svncommon._check_for_bad_chars(uripath, svncommon.ALLOWED_CHARS)): raise ValueError("bad char in path %s" % (path, )) path = path.rstrip('/') self.strpath = path self.rev = rev return self def __repr__(self): if self.rev == -1: return 'svnurl(%r)' % self.strpath else: return 'svnurl(%r, %r)' % (self.strpath, self.rev) def _svn(self, cmd, *args): if self.rev is None: return self._svnwrite(cmd, *args) else: args = ['-r', self.rev] + list(args) return self._svnwrite(cmd, *args) def _svnwrite(self, cmd, *args): l = ['svn %s' % cmd] args = ['"%s"' % self._escape(item) for item in args] l.extend(args) l.append('"%s"' % self._encodedurl()) # fixing the locale because we can't otherwise parse string = svncommon.fixlocale() + " ".join(l) if DEBUG: print "execing", string try: out = process.cmdexec(string) except py.process.cmdexec.Error, e: if (e.err.find('File Exists') != -1 or e.err.find('File already exists') != -1): raise py.error.EEXIST(self) raise return out def _encodedurl(self): return self._escape(self.strpath) def open(self, mode='r'): """ return an opened file with the given mode. """ assert 'w' not in mode and 'a' not in mode, "XXX not implemented for svn cmdline" assert self.check(file=1) # svn cat returns an empty file otherwise def popen(cmd): return os.popen(cmd) if self.rev is None: return popen(svncommon.fixlocale() + 'svn cat "%s"' % (self._escape(self.strpath), )) else: return popen(svncommon.fixlocale() + 'svn cat -r %s "%s"' % (self.rev, self._escape(self.strpath))) def dirpath(self, *args, **kwargs): """ return the directory path of the current path joined with any given path arguments. """ l = self.strpath.split(self.sep) if len(l) < 4: raise py.error.EINVAL(self, "base is not valid") elif len(l) == 4: return self.join(*args, **kwargs) else: return self.new(basename='').join(*args, **kwargs) # modifying methods (cache must be invalidated) def mkdir(self, *args, **kwargs): """ create & return the directory joined with args. You can provide a checkin message by giving a keyword argument 'msg'""" commit_msg=kwargs.get('msg', "mkdir by py lib invocation") createpath = self.join(*args) createpath._svnwrite('mkdir', '-m', commit_msg) self._lsnorevcache.delentry(createpath.dirpath().strpath) return createpath def copy(self, target, msg='copied by py lib invocation'): """ copy path to target with checkin message msg.""" if getattr(target, 'rev', None) is not None: raise py.error.EINVAL(target, "revisions are immutable") process.cmdexec('svn copy -m "%s" "%s" "%s"' %(msg, self._escape(self), self._escape(target))) self._lsnorevcache.delentry(target.dirpath().strpath) def rename(self, target, msg="renamed by py lib invocation"): """ rename this path to target with checkin message msg. """ if getattr(self, 'rev', None) is not None: raise py.error.EINVAL(self, "revisions are immutable") py.process.cmdexec('svn move -m "%s" --force "%s" "%s"' %( msg, self._escape(self), self._escape(target))) self._lsnorevcache.delentry(self.dirpath().strpath) self._lsnorevcache.delentry(self.strpath) def remove(self, rec=1, msg='removed by py lib invocation'): """ remove a file or directory (or a directory tree if rec=1) with checkin message msg.""" if self.rev is not None: raise py.error.EINVAL(self, "revisions are immutable") process.cmdexec('svn rm -m "%s" "%s"' %(msg, self._escape(self))) self._lsnorevcache.delentry(self.dirpath().strpath) def ensure(self, *args, **kwargs): """ ensure that an args-joined path exists (by default as a file). If you specify a keyword argument 'dir=True' then the path is forced to be a directory path. """ if getattr(self, 'rev', None) is not None: raise py.error.EINVAL(self, "revisions are immutable") target = self.join(*args) dir = kwargs.get('dir', 0) for x in target.parts(reverse=True): if x.check(): break else: raise py.error.ENOENT(target, "has not any valid base!") if x == target: if not x.check(dir=dir): raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) return x tocreate = target.relto(x) basename = tocreate.split(self.sep, 1)[0] tempdir = py.path.local.mkdtemp() try: tempdir.ensure(tocreate, dir=dir) cmd = 'svn import -m "%s" "%s" "%s"' % ( "ensure %s" % self._escape(tocreate), self._escape(tempdir.join(basename)), x.join(basename)._encodedurl()) process.cmdexec(cmd) self._lsnorevcache.delentry(x.strpath) # !!! finally: tempdir.remove() return target # end of modifying methods def _propget(self, name): res = self._svn('propget', name) return res[:-1] # strip trailing newline def _proplist(self): res = self._svn('proplist') lines = res.split('\n') lines = map(str.strip, lines[1:]) return svncommon.PropListDict(self, lines) def _listdir_nameinfo(self): """ return sequence of name-info directory entries of self """ def builder(): try: res = self._svn('ls', '-v') except process.cmdexec.Error, e: if e.err.find('non-existent in that revision') != -1: raise py.error.ENOENT(self, e.err) elif e.err.find('File not found') != -1: raise py.error.ENOENT(self, e.err) elif e.err.find('not part of a repository')!=-1: raise py.error.ENOENT(self, e.err) elif e.err.find('Unable to open')!=-1: raise py.error.ENOENT(self, e.err) elif e.err.lower().find('method not allowed')!=-1: raise py.error.EACCES(self, e.err) raise py.error.Error(e.err) lines = res.split('\n') nameinfo_seq = [] for lsline in lines: if lsline: info = InfoSvnCommand(lsline) nameinfo_seq.append((info._name, info)) return nameinfo_seq if self.rev is not None: return self._lsrevcache.getorbuild((self.strpath, self.rev), builder) else: return self._lsnorevcache.getorbuild(self.strpath, builder) def log(self, rev_start=None, rev_end=1, verbose=False): """ return a list of LogEntry instances for this path. rev_start is the starting revision (defaulting to the first one). rev_end is the last revision (defaulting to HEAD). if verbose is True, then the LogEntry instances also know which files changed. """ assert self.check() #make it simpler for the pipe rev_start = rev_start is None and _Head or rev_start rev_end = rev_end is None and _Head or rev_end if rev_start is _Head and rev_end == 1: rev_opt = "" else: rev_opt = "-r %s:%s" % (rev_start, rev_end) verbose_opt = verbose and "-v" or "" xmlpipe = os.popen(svncommon.fixlocale() + 'svn log --xml %s %s "%s"' % (rev_opt, verbose_opt, self.strpath)) from xml.dom import minidom tree = minidom.parse(xmlpipe) result = [] for logentry in filter(None, tree.firstChild.childNodes): if logentry.nodeType == logentry.ELEMENT_NODE: result.append(LogEntry(logentry)) return result #01234567890123456789012345678901234567890123467 # 2256 hpk 165 Nov 24 17:55 __init__.py # XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! # 1312 johnny 1627 May 05 14:32 test_decorators.py # class InfoSvnCommand: # the '0?' part in the middle is an indication of whether the resource is # locked, see 'svn help ls' lspattern = re.compile( r'^ *(?P<rev>\d+) +(?P<author>\S+) +(0? *(?P<size>\d+))? ' '*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$') def __init__(self, line): # this is a typical line from 'svn ls http://...' #_ 1127 jum 0 Jul 13 15:28 branch/ match = self.lspattern.match(line) data = match.groupdict() self._name = data['file'] if self._name[-1] == '/': self._name = self._name[:-1] self.kind = 'dir' else: self.kind = 'file' #self.has_props = l.pop(0) == 'P' self.created_rev = int(data['rev']) self.last_author = data['author'] self.size = data['size'] and int(data['size']) or 0 self.mtime = parse_time_with_missing_year(data['date']) self.time = self.mtime * 1000000 def __eq__(self, other): return self.__dict__ == other.__dict__ #____________________________________________________ # # helper functions #____________________________________________________ def parse_time_with_missing_year(timestr): """ analyze the time part from a single line of "svn ls -v" the svn output doesn't show the year makes the 'timestr' ambigous. """ t_now = time.gmtime() tparts = timestr.split() month = time.strptime(tparts.pop(0), '%b')[1] day = time.strptime(tparts.pop(0), '%d')[2] last = tparts.pop(0) # year or hour:minute try: year = time.strptime(last, '%Y')[0] hour = minute = 0 except ValueError: hour, minute = time.strptime(last, '%H:%M')[3:5] year = t_now[0] t_result = (year, month, day, hour, minute, 0,0,0,0) if t_result > t_now: year -= 1 t_result = (year, month, day, hour, minute, 0,0,0,0) return calendar.timegm(t_result) class PathEntry: def __init__(self, ppart): self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') self.action = ppart.getAttribute('action').encode('UTF-8') if self.action == 'A': self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') if self.copyfrom_path: self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) class LogEntry: def __init__(self, logentry): self.rev = int(logentry.getAttribute('revision')) for lpart in filter(None, logentry.childNodes): if lpart.nodeType == lpart.ELEMENT_NODE: if lpart.nodeName == u'author': self.author = lpart.firstChild.nodeValue.encode('UTF-8') elif lpart.nodeName == u'msg': if lpart.firstChild: self.msg = lpart.firstChild.nodeValue.encode('UTF-8') else: self.msg = '' elif lpart.nodeName == u'date': #2003-07-29T20:05:11.598637Z timestr = lpart.firstChild.nodeValue.encode('UTF-8') self.date = svncommon.parse_apr_time(timestr) elif lpart.nodeName == u'paths': self.strpaths = [] for ppart in filter(None, lpart.childNodes): if ppart.nodeType == ppart.ELEMENT_NODE: self.strpaths.append(PathEntry(ppart)) def __repr__(self): return '<Logentry rev=%d author=%s date=%s>' % ( self.rev, self.author, self.date) _Head = "HEAD"
Python
""" module with a base subversion path object. """ import os, sys, time, re, string import py from py.__.path import common ALLOWED_CHARS = "_ -/\\=$.~+" #add characters as necessary when tested if sys.platform == "win32": ALLOWED_CHARS += ":" ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' def _getsvnversion(ver=[]): try: return ver[0] except IndexError: v = py.process.cmdexec("svn -q --version") v.strip() v = '.'.join(v.split('.')[:2]) ver.append(v) return v def _escape_helper(text): text = str(text) if py.std.sys.platform != 'win32': text = str(text).replace('$', '\\$') return text def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): for c in str(text): if c.isalnum(): continue if c in allowed_chars: continue return True return False #_______________________________________________________________ class SvnPathBase(common.FSPathBase): """ Base implementation for SvnPath implementations. """ sep = '/' def _geturl(self): return self.strpath url = property(_geturl, None, None, "url of this svn-path.") def __str__(self): """ return a string representation (including rev-number) """ return self.strpath def __hash__(self): return hash(self.strpath) def new(self, **kw): """ create a modified version of this path. A 'rev' argument indicates a new revision. the following keyword arguments modify various path parts: http://host.com/repo/path/file.ext |-----------------------| dirname |------| basename |--| purebasename |--| ext """ obj = object.__new__(self.__class__) obj.rev = kw.get('rev', self.rev) dirname, basename, purebasename, ext = self._getbyspec( "dirname,basename,purebasename,ext") if 'basename' in kw: if 'purebasename' in kw or 'ext' in kw: raise ValueError("invalid specification %r" % kw) else: pb = kw.setdefault('purebasename', purebasename) ext = kw.setdefault('ext', ext) if ext and not ext.startswith('.'): ext = '.' + ext kw['basename'] = pb + ext kw.setdefault('dirname', dirname) kw.setdefault('sep', self.sep) if kw['basename']: obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw else: obj.strpath = "%(dirname)s" % kw return obj def _getbyspec(self, spec): """ get specified parts of the path. 'arg' is a string with comma separated path parts. The parts are returned in exactly the order of the specification. you may specify the following parts: http://host.com/repo/path/file.ext |-----------------------| dirname |------| basename |--| purebasename |--| ext """ res = [] parts = self.strpath.split(self.sep) for name in spec.split(','): name = name.strip() if name == 'dirname': res.append(self.sep.join(parts[:-1])) elif name == 'basename': res.append(parts[-1]) else: basename = parts[-1] i = basename.rfind('.') if i == -1: purebasename, ext = basename, '' else: purebasename, ext = basename[:i], basename[i:] if name == 'purebasename': res.append(purebasename) elif name == 'ext': res.append(ext) else: raise NameError, "Don't know part %r" % name return res def __eq__(self, other): """ return true if path and rev attributes each match """ return (str(self) == str(other) and (self.rev == other.rev or self.rev == other.rev)) def __ne__(self, other): return not self == other def join(self, *args): """ return a new Path (with the same revision) which is composed of the self Path followed by 'args' path components. """ if not args: return self args = tuple([arg.strip(self.sep) for arg in args]) parts = (self.strpath, ) + args newpath = self.__class__(self.sep.join(parts), self.rev) return newpath def propget(self, name): """ return the content of the given property. """ value = self._propget(name) return value def proplist(self): """ list all property names. """ content = self._proplist() return content def listdir(self, fil=None, sort=None): """ list directory contents, possibly filter by the given fil func and possibly sorted. """ if isinstance(fil, str): fil = common.fnmatch(fil) nameinfo_seq = self._listdir_nameinfo() if len(nameinfo_seq) == 1: name, info = nameinfo_seq[0] if name == self.basename and info.kind == 'file': #if not self.check(dir=1): raise py.error.ENOTDIR(self) paths = self._make_path_tuple(nameinfo_seq) if fil or sort: paths = filter(fil, paths) paths = isinstance(paths, list) and paths or list(paths) if callable(sort): paths.sort(sort) elif sort: paths.sort() return paths def info(self): """ return an Info structure with svn-provided information. """ parent = self.dirpath() nameinfo_seq = parent._listdir_nameinfo() bn = self.basename for name, info in nameinfo_seq: if name == bn: return info raise py.error.ENOENT(self) def size(self): """ Return the size of the file content of the Path. """ return self.info().size def mtime(self): """ Return the last modification time of the file. """ return self.info().mtime # shared help methods def _escape(self, cmd): return _escape_helper(cmd) def _make_path_tuple(self, nameinfo_seq): """ return a tuple of paths from a nameinfo-tuple sequence. """ #assert self.rev is not None, "revision of %s should not be None here" % self res = [] for name, info in nameinfo_seq: child = self.join(name) res.append(child) return tuple(res) def _childmaxrev(self): """ return maximum revision number of childs (or self.rev if no childs) """ rev = self.rev for name, info in self._listdir_nameinfo(): rev = max(rev, info.created_rev) return rev #def _getlatestrevision(self): # """ return latest repo-revision for this path. """ # url = self.strpath # path = self.__class__(url, None) # # # we need a long walk to find the root-repo and revision # while 1: # try: # rev = max(rev, path._childmaxrev()) # previous = path # path = path.dirpath() # except (IOError, process.cmdexec.Error): # break # if rev is None: # raise IOError, "could not determine newest repo revision for %s" % self # return rev class Checkers(common.FSCheckers): def dir(self): try: return self.path.info().kind == 'dir' except py.error.Error: return self._listdirworks() def _listdirworks(self): try: self.path.listdir() except py.error.ENOENT: return False else: return True def file(self): try: return self.path.info().kind == 'file' except py.error.ENOENT: return False def exists(self): try: return self.path.info() except py.error.ENOENT: return self._listdirworks() def parse_apr_time(timestr): i = timestr.rfind('.') if i == -1: raise ValueError, "could not parse %s" % timestr timestr = timestr[:i] parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") return time.mktime(parsedtime) class PropListDict(dict): """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" def __init__(self, path, keynames): dict.__init__(self, [(x, None) for x in keynames]) self.path = path def __getitem__(self, key): value = dict.__getitem__(self, key) if value is None: value = self.path.propget(key) dict.__setitem__(self, key, value) return value def fixlocale(): if sys.platform != 'win32': return 'LC_ALL=C ' return '' # some nasty chunk of code to solve path and url conversion and quoting issues ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ') if os.sep in ILLEGAL_CHARS: ILLEGAL_CHARS.remove(os.sep) ISWINDOWS = sys.platform == 'win32' _reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) def _check_path(path): illegal = ILLEGAL_CHARS[:] sp = path.strpath if ISWINDOWS: illegal.remove(':') if not _reg_allow_disk.match(sp): raise ValueError('path may not contain a colon (:)') for char in sp: if char not in string.printable or char in illegal: raise ValueError('illegal character %r in path' % (char,)) def path_to_fspath(path, addat=True): _check_path(path) sp = path.strpath if addat and path.rev != -1: sp = '%s@%s' % (sp, path.rev) elif addat: sp = '%s@HEAD' % (sp,) return sp def url_from_path(path): fspath = path_to_fspath(path, False) quote = py.std.urllib.quote if ISWINDOWS: match = _reg_allow_disk.match(fspath) fspath = fspath.replace('\\', '/') if match.group(1): fspath = '/%s%s' % (match.group(1).replace('\\', '/'), quote(fspath[len(match.group(1)):])) else: fspath = quote(fspath) else: fspath = quote(fspath) if path.rev != -1: fspath = '%s@%s' % (fspath, path.rev) else: fspath = '%s@HEAD' % (fspath,) return 'file://%s' % (fspath,)
Python
import sys import py from py import path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') # make a wc directory out of a given root url # cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create "%s"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q "%s" <"%s"' % (svncommon._escape_helper(repo), repodump)) print "created svn repository", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\', '/') wc.checkout(url='file://%s' % repo) print "checked out new repo into", wc else: print "using repository at", repo wc = py.path.svnwc(wcdir) return ("file://%s" % repo, wc) def save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len("file://"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo print repo[len("file://"):] repo = py.path.local(repo[len("file://"):]) print repo assert repo.check() # repositories have read only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for testing purposes and return the url to it def make_test_repo(name="test-repository"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise if sys.platform == 'win32': repo = '/' + str(repo).replace('\\', '/') return py.path.svnurl("file://%s" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for x in 'test_remove', 'test_move': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if x is not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join("samplefile") value = url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url = self.root.join("samplefile") res = url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join("samplefile") res = url.info() assert res.size > len("samplefile") and res.created_rev >= 0 def test_log_simple(self): py.test.skip("XXX: does not work at least on svn below 1.3") url = self.root.join("samplefile") logentries = url.log() for logentry in logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to test more normalizing properties url = self.root.join("/") assert self.root == url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) # assert url.rev == None # assert url.strpath == self.root.strpath # url = self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url = url.join("samplefile") # res = url.info() # assert res.size > len("samplefile") and res.created_rev == 1155 # the following tests are easier if we have a path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev == -1 assert url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert url == self.root.strpath def _test_getreporev(self): """ this test runs so slow it's usually disabled """ old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally: repositories.repos = old #cache.repositories.put(svnrepourl, 1200, 0)
Python
#
Python
""" svn-Command based Implementation of a Subversion WorkingCopy Path. SvnWCCommandPath is the main class. SvnWC is an alias to this class. """ import os, sys, time, re, calendar import py from py.__.path import common from py.__.path.svn import cache from py.__.path.svn import svncommon DEBUG = 0 rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') class SvnWCCommandPath(common.FSPathBase): """ path implementation offering access/modification to svn working copies. It has methods similar to the functions in os.path and similar to the commands of the svn client. """ sep = os.sep def __new__(cls, wcpath=None): self = object.__new__(cls) if isinstance(wcpath, cls): if wcpath.__class__ == cls: return wcpath wcpath = wcpath.localpath if svncommon._check_for_bad_chars(str(wcpath), svncommon.ALLOWED_CHARS): raise ValueError("bad char in wcpath %s" % (wcpath, )) self.localpath = py.path.local(wcpath) return self strpath = property(lambda x: str(x.localpath), None, None, "string path") def __eq__(self, other): return self.localpath == getattr(other, 'localpath', None) def _geturl(self): if getattr(self, '_url', None) is None: info = self.info() self._url = info.url #SvnPath(info.url, info.rev) assert isinstance(self._url, str) return self._url url = property(_geturl, None, None, "url of this WC item") def _escape(self, cmd): return svncommon._escape_helper(cmd) def dump(self, obj): """ pickle object into path location""" return self.localpath.dump(obj) def svnurl(self): """ return current SvnPath for this WC-item. """ info = self.info() return py.path.svnurl(info.url) def __repr__(self): return "svnwc(%r)" % (self.strpath) # , self._url) def __str__(self): return str(self.localpath) def _svn(self, cmd, *args): l = ['svn %s' % cmd] args = [self._escape(item) for item in args] l.extend(args) l.append('"%s"' % self._escape(self.strpath)) # try fixing the locale because we can't otherwise parse string = svncommon.fixlocale() + " ".join(l) if DEBUG: print "execing", string try: try: key = 'LC_MESSAGES' hold = os.environ.get(key) os.environ[key] = 'C' out = py.process.cmdexec(string) finally: if hold: os.environ[key] = hold else: del os.environ[key] except py.process.cmdexec.Error, e: strerr = e.err.lower() if strerr.find('file not found') != -1: raise py.error.ENOENT(self) if (strerr.find('file exists') != -1 or strerr.find('file already exists') != -1 or strerr.find("can't create directory") != -1): raise py.error.EEXIST(self) raise return out def switch(self, url): """ switch to given URL. """ self._svn('switch', url) def checkout(self, url=None, rev=None): """ checkout from url to local wcpath. """ args = [] if url is None: url = self.url if rev is None or rev == -1: if (py.std.sys.platform != 'win32' and svncommon._getsvnversion() == '1.3'): url += "@HEAD" else: if svncommon._getsvnversion() == '1.3': url += "@%d" % rev else: args.append('-r', str(rev)) self._svn('co', url, *args) def update(self, rev = 'HEAD'): """ update working copy item to given revision. (None -> HEAD). """ self._svn('up -r %s' % rev) def write(self, content, mode='wb'): """ write content into local filesystem wc. """ self.localpath.write(content, mode) def dirpath(self, *args): """ return the directory Path of the current Path. """ return self.__class__(self.localpath.dirpath(*args)) def _ensuredirs(self): parent = self.dirpath() if parent.check(dir=0): parent._ensuredirs() if self.check(dir=0): self.mkdir() return self def ensure(self, *args, **kwargs): """ ensure that an args-joined path exists (by default as a file). if you specify a keyword argument 'directory=True' then the path is forced to be a directory path. """ try: p = self.join(*args) if p.check(): if p.check(versioned=False): p.add() return p if kwargs.get('dir', 0): return p._ensuredirs() parent = p.dirpath() parent._ensuredirs() p.write("") p.add() return p except: error_enhance(sys.exc_info()) def mkdir(self, *args): """ create & return the directory joined with args. """ if args: return self.join(*args).mkdir() else: self._svn('mkdir') return self def add(self): """ add ourself to svn """ self._svn('add') def remove(self, rec=1, force=1): """ remove a file or a directory tree. 'rec'ursive is ignored and considered always true (because of underlying svn semantics. """ assert rec, "svn cannot remove non-recursively" flags = [] if force: flags.append('--force') self._svn('remove', *flags) def copy(self, target): """ copy path to target.""" py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) def rename(self, target): """ rename this path to target. """ py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(\S+)\s+(.*)') def status(self, updates=0, rec=0, externals=0): """ return (collective) Status object for this file. """ # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 # 2201 2192 jum test # XXX if externals: raise ValueError("XXX cannot perform status() " "on external items yet") else: #1.2 supports: externals = '--ignore-externals' externals = '' if rec: rec= '' else: rec = '--non-recursive' # XXX does not work on all subversion versions #if not externals: # externals = '--ignore-externals' if updates: updates = '-u' else: updates = '' update_rev = None out = self._svn('status -v %s %s %s' % (updates, rec, externals)) rootstatus = WCStatus(self) for line in out.split('\n'): if not line.strip(): continue #print "processing %r" % line flags, rest = line[:8], line[8:] # first column c0,c1,c2,c3,c4,x5,x6,c7 = flags #if '*' in line: # print "flags", repr(flags), "rest", repr(rest) if c0 in '?XI': fn = line.split(None, 1)[1] if c0 == '?': wcpath = self.join(fn, abs=1) rootstatus.unknown.append(wcpath) elif c0 == 'X': wcpath = self.__class__(self.localpath.join(fn, abs=1)) rootstatus.external.append(wcpath) elif c0 == 'I': wcpath = self.join(fn, abs=1) rootstatus.ignored.append(wcpath) continue #elif c0 in '~!' or c4 == 'S': # raise NotImplementedError("received flag %r" % c0) m = self._rex_status.match(rest) if not m: if c7 == '*': fn = rest.strip() wcpath = self.join(fn, abs=1) rootstatus.update_available.append(wcpath) continue if line.lower().find('against revision:')!=-1: update_rev = int(rest.split(':')[1].strip()) continue # keep trying raise ValueError, "could not parse line %r" % line else: rev, modrev, author, fn = m.groups() wcpath = self.join(fn, abs=1) #assert wcpath.check() if c0 == 'M': assert wcpath.check(file=1), "didn't expect a directory with changed content here" rootstatus.modified.append(wcpath) elif c0 == 'A' or c3 == '+' : rootstatus.added.append(wcpath) elif c0 == 'D': rootstatus.deleted.append(wcpath) elif c0 == 'C': rootstatus.conflict.append(wcpath) elif c0 == '~': rootstatus.kindmismatch.append(wcpath) elif c0 == '!': rootstatus.incomplete.append(wcpath) elif not c0.strip(): rootstatus.unchanged.append(wcpath) else: raise NotImplementedError("received flag %r" % c0) if c1 == 'M': rootstatus.prop_modified.append(wcpath) if c2 == 'L': rootstatus.locked.append(wcpath) if c7 == '*': rootstatus.update_available.append(wcpath) if wcpath == self: rootstatus.rev = rev rootstatus.modrev = modrev rootstatus.author = author if update_rev: rootstatus.update_rev = update_rev continue return rootstatus def diff(self, rev=None): """ return a diff of the current path against revision rev (defaulting to the last one). """ if rev is None: out = self._svn('diff') else: out = self._svn('diff -r %d' % rev) return out def blame(self): """ return a list of tuples of three elements: (revision, commiter, line)""" out = self._svn('blame') result = [] blamelines = out.splitlines() reallines = py.path.svnurl(self.url).readlines() for i, (blameline, line) in py.builtin.enumerate( zip(blamelines, reallines)): m = rex_blame.match(blameline) if not m: raise ValueError("output line %r of svn blame does not match " "expected format" % (line, )) rev, name, _ = m.groups() result.append((int(rev), name, line)) return result _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) def commit(self, message=""): """commit() returns None if there was nothing to commit and the revision number of the commit otherwise. """ out = self._svn('commit -m "%s"' % message) try: del cache.info[self] except KeyError: pass if out: m = self._rex_commit.match(out) return int(m.group(1)) def propset(self, name, value, *args): """ set property name to value on this path. """ d = py.path.local.mkdtemp() try: p = d.join('value') p.write(value) self._svn('propset', name, '--file', str(p), *args) finally: d.remove() def propget(self, name): """ get property name on this path. """ res = self._svn('propget', name) return res[:-1] # strip trailing newline def propdel(self, name): """ delete property name on this path. """ res = self._svn('propdel', name) return res[:-1] # strip trailing newline def proplist(self, rec=0): """ return a mapping of property names to property values. If rec is True, then return a dictionary mapping sub-paths to such mappings. """ if rec: res = self._svn('proplist -R') return make_recursive_propdict(self, res) else: res = self._svn('proplist') lines = res.split('\n') lines = map(str.strip, lines[1:]) return svncommon.PropListDict(self, lines) def revert(self, rec=0): """ revert the local changes of this path. if rec is True, do so recursively. """ if rec: result = self._svn('revert -R') else: result = self._svn('revert') return result def new(self, **kw): """ create a modified version of this path. A 'rev' argument indicates a new revision. the following keyword arguments modify various path parts: http://host.com/repo/path/file.ext |-----------------------| dirname |------| basename |--| purebasename |--| ext """ if kw: localpath = self.localpath.new(**kw) else: localpath = self.localpath return self.__class__(localpath) def join(self, *args, **kwargs): """ return a new Path (with the same revision) which is composed of the self Path followed by 'args' path components. """ if not args: return self localpath = self.localpath.join(*args, **kwargs) return self.__class__(localpath) def info(self, usecache=1): """ return an Info structure with svn-provided information. """ info = usecache and cache.info.get(self) if not info: try: output = self._svn('info') except py.process.cmdexec.Error, e: if e.err.find('Path is not a working copy directory') != -1: raise py.error.ENOENT(self, e.err) raise # XXX SVN 1.3 has output on stderr instead of stdout (while it does # return 0!), so a bit nasty, but we assume no output is output # to stderr... if (output.strip() == '' or output.lower().find('not a versioned resource') != -1): raise py.error.ENOENT(self, output) info = InfoSvnWCCommand(output) # Can't reliably compare on Windows without access to win32api if py.std.sys.platform != 'win32': if info.path != self.localpath: raise py.error.ENOENT(self, "not a versioned resource:" + " %s != %s" % (info.path, self.localpath)) cache.info[self] = info self.rev = info.rev return info def listdir(self, fil=None, sort=None): """ return a sequence of Paths. listdir will return either a tuple or a list of paths depending on implementation choices. """ if isinstance(fil, str): fil = common.fnmatch(fil) # XXX unify argument naming with LocalPath.listdir def notsvn(path): return path.basename != '.svn' paths = [] for localpath in self.localpath.listdir(notsvn): p = self.__class__(localpath) paths.append(p) if fil or sort: paths = filter(fil, paths) paths = isinstance(paths, list) and paths or list(paths) if callable(sort): paths.sort(sort) elif sort: paths.sort() return paths def open(self, mode='r'): """ return an opened file with the given mode. """ return open(self.strpath, mode) def _getbyspec(self, spec): return self.localpath._getbyspec(spec) class Checkers(py.path.local.Checkers): def __init__(self, path): self.svnwcpath = path self.path = path.localpath def versioned(self): try: s = self.svnwcpath.info() except (py.error.ENOENT, py.error.EEXIST): return False except py.process.cmdexec.Error, e: if e.err.find('is not a working copy')!=-1: return False raise else: return True def log(self, rev_start=None, rev_end=1, verbose=False): """ return a list of LogEntry instances for this path. rev_start is the starting revision (defaulting to the first one). rev_end is the last revision (defaulting to HEAD). if verbose is True, then the LogEntry instances also know which files changed. """ from py.__.path.svn.urlcommand import _Head, LogEntry assert self.check() # make it simpler for the pipe rev_start = rev_start is None and _Head or rev_start rev_end = rev_end is None and _Head or rev_end if rev_start is _Head and rev_end == 1: rev_opt = "" else: rev_opt = "-r %s:%s" % (rev_start, rev_end) verbose_opt = verbose and "-v" or "" s = svncommon.fixlocale() # some blather on stderr stdin, stdout, stderr = os.popen3(s + 'svn log --xml %s %s "%s"' % ( rev_opt, verbose_opt, self.strpath)) from xml.dom import minidom from xml.parsers.expat import ExpatError try: tree = minidom.parse(stdout) except ExpatError: # XXX not entirely sure about this exception... shouldn't it be # some py.error.* something? raise ValueError('no such revision') result = [] for logentry in filter(None, tree.firstChild.childNodes): if logentry.nodeType == logentry.ELEMENT_NODE: result.append(LogEntry(logentry)) return result def size(self): """ Return the size of the file content of the Path. """ return self.info().size def mtime(self): """ Return the last modification time of the file. """ return self.info().mtime def __hash__(self): return hash((self.strpath, self.__class__)) class WCStatus: attrnames = ('modified','added', 'conflict', 'unchanged', 'external', 'deleted', 'prop_modified', 'unknown', 'update_available', 'incomplete', 'kindmismatch', 'ignored' ) def __init__(self, wcpath, rev=None, modrev=None, author=None): self.wcpath = wcpath self.rev = rev self.modrev = modrev self.author = author for name in self.attrnames: setattr(self, name, []) def allpath(self, sort=True, **kw): d = {} for name in self.attrnames: if name not in kw or kw[name]: for path in getattr(self, name): d[path] = 1 l = d.keys() if sort: l.sort() return l class InfoSvnWCCommand: def __init__(self, output): # Path: test # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada # Revision: 2151 # Node Kind: directory # Schedule: normal # Last Changed Author: hpk # Last Changed Rev: 2100 # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) d = {} for line in output.split('\n'): if not line.strip(): continue key, value = line.split(':', 1) key = key.lower().replace(' ', '') value = value.strip() d[key] = value try: self.url = d['url'] except KeyError: raise ValueError, "Not a versioned resource" #raise ValueError, "Not a versioned resource %r" % path self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] self.rev = int(d['revision']) self.path = py.path.local(d['path']) self.size = self.path.size() if 'lastchangedrev' in d: self.created_rev = int(d['lastchangedrev']) if 'lastchangedauthor' in d: self.last_author = d['lastchangedauthor'] if 'lastchangeddate' in d: self.mtime = parse_wcinfotime(d['lastchangeddate']) self.time = self.mtime * 1000000 def __eq__(self, other): return self.__dict__ == other.__dict__ def parse_wcinfotime(timestr): """ Returns seconds since epoch, UTC. """ # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) if not m: raise ValueError, "timestring %r does not match" % timestr timestr, timezone = m.groups() # do not handle timezone specially, return value should be UTC parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") return calendar.timegm(parsedtime) def make_recursive_propdict(wcroot, output, rex = re.compile("Properties on '(.*)':")): """ Return a dictionary of path->PropListDict mappings. """ lines = filter(None, output.split('\n')) pdict = {} while lines: line = lines.pop(0) m = rex.match(line) if not m: raise ValueError, "could not parse propget-line: %r" % line path = m.groups()[0] wcpath = wcroot.join(path, abs=1) propnames = [] while lines and lines[0].startswith(' '): propname = lines.pop(0).strip() propnames.append(propname) assert propnames, "must have found properties!" pdict[wcpath] = svncommon.PropListDict(wcpath, propnames) return pdict def error_enhance((cls, error, tb)): raise cls, error, tb
Python
#
Python
""" # generic cache mechanism for subversion-related structures # XXX make mt-safe """ import time proplist = {} info = {} entries = {} prop = {} #----------------------------------------------------------- # Caching latest repository revision and repo-paths # (getting them is slow with the current implementations) # # XXX make mt-safe #----------------------------------------------------------- class RepoEntry: def __init__(self, url, rev, timestamp): self.url = url self.rev = rev self.timestamp = timestamp def __str__(self): return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) class RepoCache: """ The Repocache manages discovered repository paths and their revisions. If inside a timeout the cache will even return the revision of the root. """ timeout = 20 # seconds after which we forget that we know the last revision def __init__(self): self.repos = [] def clear(self): self.repos = [] def put(self, url, rev, timestamp=None): if rev is None: return if timestamp is None: timestamp = time.time() for entry in self.repos: if url == entry.url: entry.timestamp = timestamp entry.rev = rev #print "set repo", entry break else: entry = RepoEntry(url, rev, timestamp) self.repos.append(entry) #print "appended repo", entry def get(self, url): now = time.time() for entry in self.repos: if url.startswith(entry.url): if now < entry.timestamp + self.timeout: #print "returning immediate Etrny", entry return entry.url, entry.rev return entry.url, -1 return url, -1 repositories = RepoCache()
Python
""" unified file system api """
Python
import py, itertools from py.__.path import common COUNTER = itertools.count() class RemotePath(common.FSPathBase): sep = '/' def __init__(self, channel, id, basename=None): self._channel = channel self._id = id self._basename = basename self._specs = {} def __del__(self): self._channel.send(('DEL', self._id)) def __repr__(self): return 'RemotePath(%s)' % self.basename def listdir(self, *args): self._channel.send(('LIST', self._id) + args) return [RemotePath(self._channel, id, basename) for (id, basename) in self._channel.receive()] def dirpath(self): id = ~COUNTER.next() self._channel.send(('DIRPATH', self._id, id)) return RemotePath(self._channel, id) def join(self, *args): id = ~COUNTER.next() self._channel.send(('JOIN', self._id, id) + args) return RemotePath(self._channel, id) def get(self, spec): parts = spec.split(',') ask = [x for x in parts if x not in self._specs] if ask: self._channel.send(('GET', self._id, ",".join(ask))) for part, value in zip(ask, self._channel.receive()): self._specs[part] = value return [self._specs[x] for x in parts] def read(self): self._channel.send(('READ', self._id)) return self._channel.receive()
Python
import threading class PathServer: def __init__(self, channel): self.channel = channel self.C2P = {} self.next_id = 0 threading.Thread(target=self.serve).start() def p2c(self, path): id = self.next_id self.next_id += 1 self.C2P[id] = path return id def command_LIST(self, id, *args): path = self.C2P[id] answer = [(self.p2c(p), p.basename) for p in path.listdir(*args)] self.channel.send(answer) def command_DEL(self, id): del self.C2P[id] def command_GET(self, id, spec): path = self.C2P[id] self.channel.send(path.get(spec)) def command_READ(self, id): path = self.C2P[id] self.channel.send(path.read()) def command_JOIN(self, id, resultid, *args): path = self.C2P[id] assert resultid not in self.C2P self.C2P[resultid] = path.join(*args) def command_DIRPATH(self, id, resultid): path = self.C2P[id] assert resultid not in self.C2P self.C2P[resultid] = path.dirpath() def serve(self): try: while 1: msg = self.channel.receive() meth = getattr(self, 'command_' + msg[0]) meth(*msg[1:]) except EOFError: pass if __name__ == '__main__': import py gw = py.execnet.PopenGateway() channel = gw.channelfactory.new() srv = PathServer(channel) c = gw.remote_exec(""" import remotepath p = remotepath.RemotePath(channel.receive(), channel.receive()) channel.send(len(p.listdir())) """) c.send(channel) c.send(srv.p2c(py.path.local('/tmp'))) print c.receive()
Python
#
Python
import py from remotepath import RemotePath SRC = open('channeltest.py', 'r').read() SRC += ''' import py srv = PathServer(channel.receive()) channel.send(srv.p2c(py.path.local("/tmp"))) ''' #gw = py.execnet.SshGateway('codespeak.net') gw = py.execnet.PopenGateway() c = gw.remote_exec(SRC) subchannel = gw.channelfactory.new() c.send(subchannel) p = RemotePath(subchannel, c.receive())
Python
""" package initialization. You use the functionality of this package by putting from py.initpkg import initpkg initpkg(__name__, exportdefs={ 'name1.name2' : ('./path/to/file.py', 'name') ... }) into your package's __init__.py file. This will lead your package to only expose the names of all your implementation files that you explicitely specify. In the above example 'name1' will become a Module instance where 'name2' is bound in its namespace to the 'name' object in the relative './path/to/file.py' python module. Note that you can also use a '.c' file in which case it will be compiled via distutils-facilities on the fly. """ from __future__ import generators import sys import os assert sys.version_info >= (2,2,0), "py lib requires python 2.2 or higher" from types import ModuleType # --------------------------------------------------- # Package Object # --------------------------------------------------- class Package(object): def __init__(self, name, exportdefs): pkgmodule = sys.modules[name] assert pkgmodule.__name__ == name self.name = name self.exportdefs = exportdefs self.module = pkgmodule assert not hasattr(pkgmodule, '__package__'), \ "unsupported reinitialization of %r" % pkgmodule pkgmodule.__package__ = self # make available pkgname.__ implname = name + '.' + '__' self.implmodule = ModuleType(implname) self.implmodule.__name__ = implname self.implmodule.__file__ = os.path.abspath(pkgmodule.__file__) self.implmodule.__path__ = [os.path.abspath(p) for p in pkgmodule.__path__] pkgmodule.__ = self.implmodule setmodule(implname, self.implmodule) # inhibit further direct filesystem imports through the package module del pkgmodule.__path__ def _resolve(self, extpyish): """ resolve a combined filesystem/python extpy-ish path. """ fspath, modpath = extpyish if not fspath.endswith('.py'): import py e = py.path.local(self.implmodule.__file__) e = e.dirpath(fspath, abs=True) e = py.path.extpy(e, modpath) return e.resolve() assert fspath.startswith('./'), \ "%r is not an implementation path (XXX)" % (extpyish,) implmodule = self._loadimpl(fspath[:-3]) if not isinstance(modpath, str): # export the entire module return implmodule current = implmodule for x in modpath.split('.'): try: current = getattr(current, x) except AttributeError: raise AttributeError("resolving %r failed: %s" %( extpyish, x)) return current def getimportname(self, path): if not path.ext.startswith('.py'): return None import py base = py.path.local(self.implmodule.__file__).dirpath() if not path.relto(base): return None names = path.new(ext='').relto(base).split(path.sep) dottedname = ".".join([self.implmodule.__name__] + names) return dottedname def _loadimpl(self, relfile): """ load implementation for the given relfile. """ parts = [x.strip() for x in relfile.split('/') if x and x!= '.'] modpath = ".".join([self.implmodule.__name__] + parts) #print "trying import", modpath return __import__(modpath, None, None, ['__doc__']) def exportitems(self): return self.exportdefs.items() def getpath(self): from py.path import local base = local(self.implmodule.__file__).dirpath() assert base.check() return base def _iterfiles(self): from py.__.path.common import checker base = self.getpath() for x in base.visit(checker(file=1, notext='.pyc'), rec=checker(dotfile=0)): yield x def shahexdigest(self, cache=[]): """ return sha hexdigest for files contained in package. """ if cache: return cache[0] from sha import sha sum = sha() # XXX the checksum depends on the order in which visit() enumerates # the files, and it doesn't depend on the file names and paths for x in self._iterfiles(): sum.update(x.read()) cache.append(sum.hexdigest()) return cache[0] def getzipdata(self): """ return string representing a zipfile containing the package. """ import zipfile import py try: from cStringIO import StringIO except ImportError: from StringIO import StringIO base = py.__package__.getpath().dirpath() outf = StringIO() f = zipfile.ZipFile(outf, 'w', compression=zipfile.ZIP_DEFLATED) try: for x in self._iterfiles(): f.write(str(x), x.relto(base)) finally: f.close() return outf.getvalue() def getrev(self): import py p = py.path.svnwc(self.module.__file__).dirpath() try: return p.info().rev except (KeyboardInterrupt, MemoryError, SystemExit): raise except: return 'unknown' def setmodule(modpath, module): #print "sys.modules[%r] = %r" % (modpath, module) sys.modules[modpath] = module # --------------------------------------------------- # Virtual Module Object # --------------------------------------------------- class Module(ModuleType): def __init__(self, pkg, name): self.__package__ = pkg self.__name__ = name self.__map__ = {} def __getattr__(self, name): if '*' in self.__map__: extpy = self.__map__['*'][0], name result = self.__package__._resolve(extpy) else: try: extpy = self.__map__[name] except KeyError: __tracebackhide__ = True raise AttributeError(name) else: result = self.__package__._resolve(extpy) del self.__map__[name] setattr(self, name, result) #self._fixinspection(result, name) return result def _deprecated_fixinspection(self, result, name): # modify some attrs to make a class appear at export level if hasattr(result, '__module__'): if not result.__module__.startswith('py.__'): return # don't change __module__ nor __name__ for classes # that the py lib re-exports from somewhere else, # e.g. py.builtin.BaseException try: setattr(result, '__module__', self.__name__) except (AttributeError, TypeError): pass if hasattr(result, '__bases__'): try: setattr(result, '__name__', name) except (AttributeError, TypeError): pass def __repr__(self): return '<Module %r>' % (self.__name__, ) def getdict(self): # force all the content of the module to be loaded when __dict__ is read dictdescr = ModuleType.__dict__['__dict__'] dict = dictdescr.__get__(self) if dict is not None: if '*' not in self.__map__: for name in self.__map__.keys(): hasattr(self, name) # force attribute to be loaded, ignore errors assert not self.__map__, "%r not empty" % self.__map__ else: fsname = self.__map__['*'][0] dict.update(self.__package__._loadimpl(fsname[:-3]).__dict__) return dict __dict__ = property(getdict) del getdict # --------------------------------------------------- # Bootstrap Virtual Module Hierarchy # --------------------------------------------------- def initpkg(pkgname, exportdefs, **kw): #print "initializing package", pkgname # bootstrap Package object pkg = Package(pkgname, exportdefs) for name, value in kw.items(): setattr(pkg, name, value) seen = { pkgname : pkg.module } deferred_imports = [] for pypath, extpy in pkg.exportitems(): pyparts = pypath.split('.') modparts = pyparts[:] if extpy[1] != '*': lastmodpart = modparts.pop() else: lastmodpart = '*' current = pkgname # ensure modules for name in modparts: previous = current current += '.' + name if current not in seen: seen[current] = mod = Module(pkg, current) setattr(seen[previous], name, mod) setmodule(current, mod) mod = seen[current] if not hasattr(mod, '__map__'): assert mod is pkg.module, \ "only root modules are allowed to be non-lazy. " deferred_imports.append((mod, pyparts[-1], extpy)) else: if extpy[1] == '__doc__': mod.__doc__ = pkg._resolve(extpy) else: mod.__map__[lastmodpart] = extpy for mod, pypart, extpy in deferred_imports: setattr(mod, pypart, pkg._resolve(extpy))
Python
#!/usr/bin/env python import sys, os, os.path progpath = sys.argv[0] packagedir = os.path.abspath(os.path.dirname(progpath)) packagename = os.path.basename(packagedir) bindir = os.path.join(packagedir, 'bin') if sys.platform == 'win32': bindir = os.path.join(bindir, 'win32') rootdir = os.path.dirname(packagedir) def prepend_path(name, value): sep = os.path.pathsep curpath = os.environ.get(name, '') newpath = [value] + [ x for x in curpath.split(sep) if x and x != value ] return setenv(name, sep.join(newpath)) def setenv(name, value): shell = os.environ.get('SHELL', '') comspec = os.environ.get('COMSPEC', '') if shell.endswith('csh'): cmd = 'setenv %s "%s"' % (name, value) elif shell.endswith('sh'): cmd = '%s="%s"; export %s' % (name, value, name) elif comspec.endswith('cmd.exe'): cmd = 'set %s=%s' % (name, value) else: assert False, 'Shell not supported.' return cmd print prepend_path('PATH', bindir) print prepend_path('PYTHONPATH', rootdir)
Python
#
Python
""" high-level sub-process handling """
Python
""" module defining basic hook for executing commands in a - as much as possible - platform independent way. Current list: exec_cmd(cmd) executes the given command and returns output or ExecutionFailed exception (if exit status!=0) """ import os, sys import py #----------------------------------------------------------- # posix external command execution #----------------------------------------------------------- def posix_exec_cmd(cmd): """ return output of executing 'cmd'. raise ExecutionFailed exeception if the command failed. the exception will provide an 'err' attribute containing the error-output from the command. """ __tracebackhide__ = True import popen2 import errno #print "execing", cmd child = popen2.Popen3(cmd, 1) stdin, stdout, stderr = child.tochild, child.fromchild, child.childerr stdin.close() # XXX sometimes we get a blocked r.read() call (see below) # although select told us there is something to read. # only the next three lines appear to prevent # the read call from blocking infinitely. import fcntl def set_non_block(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = flags | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags) set_non_block(stdout.fileno()) set_non_block(stderr.fileno()) #fcntl.fcntl(stdout, fcntl.F_SETFL, os.O_NONBLOCK) #fcntl.fcntl(stderr, fcntl.F_SETFL, os.O_NONBLOCK) import select out, err = [], [] while 1: r_list = filter(lambda x: x and not x.closed, [stdout, stderr]) if not r_list: break try: r_list = select.select(r_list, [], [])[0] except (select.error, IOError), se: if se.args[0] == errno.EINTR: continue else: raise for r in r_list: try: data = r.read() # XXX see XXX above except IOError, io: if io.args[0] == errno.EAGAIN: continue # Connection Lost raise except OSError, ose: if ose.errno == errno.EPIPE: # Connection Lost raise if ose.errno == errno.EAGAIN: # MacOS-X does this continue raise if not data: r.close() continue if r is stdout: out.append(data) else: err.append(data) pid, systemstatus = os.waitpid(child.pid, 0) if pid != child.pid: raise ExecutionFailed, "child process disappeared during: "+ cmd if systemstatus: if os.WIFSIGNALED(systemstatus): status = os.WTERMSIG(systemstatus) + 128 else: status = os.WEXITSTATUS(systemstatus) raise ExecutionFailed(status, systemstatus, cmd, ''.join(out), ''.join(err)) return "".join(out) #----------------------------------------------------------- # simple win32 external command execution #----------------------------------------------------------- def win32_exec_cmd(cmd): """ return output of executing 'cmd'. raise ExecutionFailed exeception if the command failed. the exception will provide an 'err' attribute containing the error-output from the command. Note that this method can currently deadlock because we don't have WaitForMultipleObjects in the std-python api. Further note that the rules for quoting are very special under Windows. Do a HELP CMD in a shell, and tell me if you understand this. For now, I try to do a fix. """ #print "*****", cmd # the following quoting is only valid for CMD.EXE, not COMMAND.COM cmd_quoting = True try: if os.environ['COMSPEC'].upper().endswith('COMMAND.COM'): cmd_quoting = False except KeyError: pass if cmd_quoting: if '"' in cmd and not cmd.startswith('""'): cmd = '"%s"' % cmd return popen3_exec_cmd(cmd) def popen3_exec_cmd(cmd): stdin, stdout, stderr = os.popen3(cmd) out = stdout.read() err = stderr.read() stdout.close() stderr.close() status = stdin.close() if status: raise ExecutionFailed(status, status, cmd, out, err) return out def pypy_exec_cmd(cmd): return popen3_exec_cmd(cmd) class ExecutionFailed(py.error.Error): def __init__(self, status, systemstatus, cmd, out, err): Exception.__init__(self) self.status = status self.systemstatus = systemstatus self.cmd = cmd self.err = err self.out = out def __str__(self): return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) # # choose correct platform-version # if sys.platform == 'win32': cmdexec = win32_exec_cmd elif hasattr(sys, 'pypy') or hasattr(sys, 'pypy_objspaceclass'): cmdexec = popen3_exec_cmd else: cmdexec = posix_exec_cmd # export the exception under the name 'py.process.cmdexec.Error' cmdexec.Error = ExecutionFailed try: ExecutionFailed.__module__ = 'py.process.cmdexec' ExecutionFailed.__name__ = 'Error' except (AttributeError, TypeError): pass
Python
#!/usr/bin/python import py for x in py.path.local(): if x.ext == '.txt': cmd = ("python /home/hpk/projects/docutils/tools/rst2s5.py " "%s %s" %(x, x.new(ext='.html'))) print "execing", cmd py.std.os.system(cmd)
Python
import py py.magic.autopath() import py pydir = py.path.local(py.__file__).dirpath() distdir = pydir.dirpath() dist_url = 'http://codespeak.net/svn/py/dist/' #issue_url = 'http://codespeak.net/issue/py-dev/' docdir = pydir.join('documentation') reffile = docdir / 'talk' / '_ref.txt' linkrex = py.std.re.compile('`(\S+)`_') name2target = {} def addlink(linkname, linktarget): assert linkname and linkname != '/' if linktarget in name2target: if linkname in name2target[linktarget]: return name2target.setdefault(linktarget, []).append(linkname) for textfile in docdir.visit(lambda x: x.ext == '.txt', lambda x: x.check(dotfile=0)): for linkname in linkrex.findall(textfile.read()): if '/' in linkname: for startloc in ('', 'py'): cand = distdir.join(startloc, linkname) if cand.check(): rel = cand.relto(distdir) # we are in py/doc/x.txt count = rel.count("/") + 1 target = '../' * count + rel addlink(linkname, target) break else: print "WARNING %s: link %r may be bogus" %(textfile, linkname) elif linkname.startswith('issue'): addlink(linkname, issue_url+linkname) items = name2target.items() items.sort() lines = [] for linktarget, linknamelist in items: linknamelist.sort() for linkname in linknamelist[:-1]: lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) reffile.write("\n".join(lines)) print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x
Python
#!/usr/bin/python import py for x in py.path.local(): if x.ext == '.txt': cmd = ("python /home/hpk/projects/docutils/tools/rst2s5.py " "%s %s" %(x, x.new(ext='.html'))) print "execing", cmd py.std.os.system(cmd)
Python
import py from py.__.misc.rest import convert_rest_html, strip_html_header from py.__.misc.difftime import worded_time from py.__.doc.conftest import get_apigenpath, get_docpath from py.__.apigen.linker import relpath html = py.xml.html class Page(object): doctype = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n') def __init__(self, project, title, targetpath, stylesheeturl=None, type="text/html", encoding="ISO-8859-1"): self.project = project self.title = project.prefix_title + title self.targetpath = targetpath self.stylesheeturl = stylesheeturl self.type = type self.encoding = encoding self.body = html.body() self.head = html.head() self._root = html.html(self.head, self.body) self.fill() def a_docref(self, name, relhtmlpath): docpath = self.project.get_docpath() return html.a(name, class_="menu", href=relpath(self.targetpath.strpath, docpath.join(relhtmlpath).strpath)) def a_apigenref(self, name, relhtmlpath): apipath = get_apigenpath() return html.a(name, class_="menu", href=relpath(self.targetpath.strpath, apipath.join(relhtmlpath).strpath)) def fill_menubar(self): items = [ self.a_docref("index", "index.html"), self.a_apigenref("api", "api/index.html"), self.a_apigenref("source", "source/index.html"), self.a_docref("contact", "contact.html"), self.a_docref("download", "download.html"), ] items2 = [items.pop(0)] sep = " " for item in items: items2.append(sep) items2.append(item) self.menubar = html.div(id="menubar", *items2) def fill(self): content_type = "%s;charset=%s" %(self.type, self.encoding) self.head.append(html.title(self.title)) self.head.append(html.meta(name="Content-Type", content=content_type)) if self.stylesheeturl: self.head.append( html.link(href=self.stylesheeturl, media="screen", rel="stylesheet", type="text/css")) self.fill_menubar() self.metaspace = html.div( html.div(self.title, class_="project_title"), self.menubar, id='metaspace') self.body.append(self.project.logo) self.body.append(self.metaspace) self.contentspace = html.div(id="contentspace") self.body.append(self.contentspace) def unicode(self, doctype=True): page = self._root.unicode() if doctype: return self.doctype + page else: return page class PyPage(Page): def get_menubar(self): menubar = super(PyPage, self).get_menubar() # base layout menubar.append( html.a("issue", href="https://codespeak.net/issue/py-dev/", class_="menu"), ) return menubar def getrealname(username): try: import uconf except ImportError: return username try: user = uconf.system.User(username) except KeyboardInterrupt: raise try: return user.realname or username except KeyError: return username class Project: mydir = py.magic.autopath().dirpath() # string for url, path for local file stylesheet = mydir.join('style.css') title = "py lib" prefix_title = "" # we have a logo already containing "py lib" encoding = 'latin1' logo = html.div( html.a( html.img(alt="py lib", id='pyimg', height=114, width=154, src="http://codespeak.net/img/pylib.png"), href="http://codespeak.net")) Page = PyPage def get_content(self, txtpath, encoding): return unicode(txtpath.read(), encoding) def get_docpath(self): return get_docpath() def get_htmloutputpath(self, txtpath): docpath = self.get_docpath() reloutputpath = txtpath.new(ext='.html').relto(self.mydir) return docpath.join(reloutputpath) def process(self, txtpath): encoding = self.encoding content = self.get_content(txtpath, encoding) docpath = self.get_docpath() outputpath = self.get_htmloutputpath(txtpath) stylesheet = self.stylesheet if isinstance(self.stylesheet, py.path.local): if not docpath.join(stylesheet.basename).check(): docpath.ensure(dir=True) stylesheet.copy(docpath) stylesheet = relpath(outputpath.strpath, docpath.join(stylesheet.basename).strpath) content = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) content = strip_html_header(content, encoding=encoding) page = self.Page(self, "[%s] " % txtpath.purebasename, outputpath, stylesheeturl=stylesheet) try: svninfo = txtpath.info() modified = " modified %s by %s" % (worded_time(svninfo.mtime), getrealname(svninfo.last_author)) except (KeyboardInterrupt, SystemExit): raise except: modified = " " page.contentspace.append( html.div(html.div(modified, style="float: right; font-style: italic;"), id = 'docinfoline')) page.contentspace.append(py.xml.raw(content)) outputpath.ensure().write(page.unicode().encode(encoding))
Python
from __future__ import generators import py from py.__.misc import rest from py.__.apigen.linker import relpath import os pypkgdir = py.path.local(py.__file__).dirpath() mypath = py.magic.autopath().dirpath() Option = py.test.config.Option option = py.test.config.addoptions("documentation check options", Option('-R', '--checkremote', action="store_true", dest="checkremote", default=False, help="perform tests involving remote accesses (links, svn)" ), Option('', '--forcegen', action="store_true", dest="forcegen", default=False, help="force generation of html files even if they appear up-to-date" ), ) def get_apigenpath(): from py.__.conftest import option path = os.environ.get('APIGENPATH') if path is None: path = option.apigenpath return pypkgdir.join(path, abs=True) def get_docpath(): from py.__.conftest import option path = os.environ.get('DOCPATH') if path is None: path = option.docpath return pypkgdir.join(path, abs=True) def get_apigen_relpath(): return relpath(get_docpath().strpath + '/', get_apigenpath().strpath + '/') def deindent(s, sep='\n'): leastspaces = -1 lines = s.split(sep) for line in lines: if not line.strip(): continue spaces = len(line) - len(line.lstrip()) if leastspaces == -1 or spaces < leastspaces: leastspaces = spaces if leastspaces == -1: return s for i, line in py.builtin.enumerate(lines): if not line.strip(): lines[i] = '' else: lines[i] = line[leastspaces:] return sep.join(lines) _initialized = False def checkdocutils(): global _initialized try: import docutils except ImportError: py.test.skip("docutils not importable") if not _initialized: from py.__.rest import directive directive.register_linkrole('api', resolve_linkrole) directive.register_linkrole('source', resolve_linkrole) _initialized = True def restcheck(path): localpath = path if hasattr(path, 'localpath'): localpath = path.localpath checkdocutils() import docutils.utils try: cur = localpath for x in cur.parts(reverse=True): confrest = x.dirpath('confrest.py') if confrest.check(file=1): confrest = confrest.pyimport() project = confrest.Project() _checkskip(path, project.get_htmloutputpath(path)) project.process(path) break else: # defer to default processor _checkskip(path) rest.process(path) except KeyboardInterrupt: raise except docutils.utils.SystemMessage: # we assume docutils printed info on stdout py.test.fail("docutils processing failed, see captured stderr") def _checkskip(lpath, htmlpath=None): if not option.forcegen: lpath = py.path.local(lpath) if htmlpath is not None: htmlpath = py.path.local(htmlpath) if lpath.ext == '.txt': htmlpath = htmlpath or lpath.new(ext='.html') if htmlpath.check(file=1) and htmlpath.mtime() >= lpath.mtime(): py.test.skip("html file is up to date, use --forcegen to regenerate") #return [] # no need to rebuild class ReSTSyntaxTest(py.test.collect.Item): def run(self): mypath = self.fspath restcheck(py.path.svnwc(mypath)) class DoctestText(py.test.collect.Item): def run(self): # XXX quite nasty... but it works (fixes win32 issues) s = self._normalize_linesep() l = [] prefix = '.. >>> ' mod = py.std.types.ModuleType(self.fspath.purebasename) for line in deindent(s).split('\n'): stripped = line.strip() if stripped.startswith(prefix): exec py.code.Source(stripped[len(prefix):]).compile() in \ mod.__dict__ line = "" else: l.append(line) docstring = "\n".join(l) self.execute(mod, docstring) def execute(self, mod, docstring): mod.__doc__ = docstring failed, tot = py.compat.doctest.testmod(mod, verbose=1) if failed: py.test.fail("doctest %s: %s failed out of %s" %( self.fspath, failed, tot)) def _normalize_linesep(self): s = self.fspath.read() linesep = '\n' if '\r' in s: if '\n' not in s: linesep = '\r' else: linesep = '\r\n' s = s.replace(linesep, '\n') return s class LinkCheckerMaker(py.test.collect.Collector): def run(self): l = [] for call, tryfn, path, lineno in genlinkchecks(self.fspath): l.append(tryfn) return l def join(self, name): for call, tryfn, path, lineno in genlinkchecks(self.fspath): if tryfn == name: return CheckLink(name, parent=self, args=(tryfn, path, lineno), obj=call) class CheckLink(py.test.collect.Function): def setup(self): pass def teardown(self): pass class ReSTChecker(py.test.collect.Module): DoctestText = DoctestText ReSTSyntaxTest = ReSTSyntaxTest def __repr__(self): return py.test.collect.Collector.__repr__(self) def setup(self): pass def teardown(self): pass def run(self): return [self.fspath.basename, 'checklinks', 'doctest'] def join(self, name): if name == self.fspath.basename: return self.ReSTSyntaxTest(name, parent=self) elif name == 'checklinks': return LinkCheckerMaker(name, self) elif name == 'doctest': return self.DoctestText(name, self) # generating functions + args as single tests def genlinkchecks(path): for lineno, line in py.builtin.enumerate(path.readlines()): line = line.strip() if line.startswith('.. _'): if line.startswith('.. _`'): delim = '`:' else: delim = ':' l = line.split(delim, 1) if len(l) != 2: continue tryfn = l[1].strip() if tryfn.startswith('http:') or tryfn.startswith('https'): if option.checkremote: yield urlcheck, tryfn, path, lineno elif tryfn.startswith('webcal:'): continue else: i = tryfn.find('#') if i != -1: checkfn = tryfn[:i] else: checkfn = tryfn if checkfn.strip() and (1 or checkfn.endswith('.html')): yield localrefcheck, tryfn, path, lineno def urlcheck(tryfn, path, lineno): try: print "trying remote", tryfn py.std.urllib2.urlopen(tryfn) except (py.std.urllib2.URLError, py.std.urllib2.HTTPError), e: if e.code in (401, 403): # authorization required, forbidden py.test.skip("%s: %s" %(tryfn, str(e))) else: py.test.fail("remote reference error %r in %s:%d\n%s" %( tryfn, path.basename, lineno+1, e)) def localrefcheck(tryfn, path, lineno): # assume it should be a file i = tryfn.find('#') if tryfn.startswith('javascript:'): return # don't check JS refs if i != -1: anchor = tryfn[i+1:] tryfn = tryfn[:i] else: anchor = '' fn = path.dirpath(tryfn) ishtml = fn.ext == '.html' fn = ishtml and fn.new(ext='.txt') or fn print "filename is", fn if not fn.check(): # not ishtml or not fn.check(): if not py.path.local(tryfn).check(): # the html could be there py.test.fail("reference error %r in %s:%d" %( tryfn, path.basename, lineno+1)) if anchor: source = unicode(fn.read(), 'latin1') source = source.lower().replace('-', ' ') # aehem anchor = anchor.replace('-', ' ') match2 = ".. _`%s`:" % anchor match3 = ".. _%s:" % anchor candidates = (anchor, match2, match3) print "candidates", repr(candidates) for line in source.split('\n'): line = line.strip() if line in candidates: break else: py.test.fail("anchor reference error %s#%s in %s:%d" %( tryfn, anchor, path.basename, lineno+1)) # ___________________________________________________________ # # hooking into py.test Directory collector's chain ... class DocDirectory(py.test.collect.Directory): ReSTChecker = ReSTChecker def run(self): results = super(DocDirectory, self).run() for x in self.fspath.listdir('*.txt', sort=True): results.append(x.basename) return results def join(self, name): if not name.endswith('.txt'): return super(DocDirectory, self).join(name) p = self.fspath.join(name) if p.check(file=1): return self.ReSTChecker(p, parent=self) Directory = DocDirectory def resolve_linkrole(name, text, check=True): apigen_relpath = get_apigen_relpath() if name == 'api': if text == 'py': return ('py', apigen_relpath + 'api/index.html') else: assert text.startswith('py.'), ( 'api link "%s" does not point to the py package') % (text,) dotted_name = text if dotted_name.find('(') > -1: dotted_name = dotted_name[:text.find('(')] # remove pkg root path = dotted_name.split('.')[1:] dotted_name = '.'.join(path) obj = py if check: for chunk in path: try: obj = getattr(obj, chunk) except AttributeError: raise AssertionError( 'problem with linkrole :api:`%s`: can not resolve ' 'dotted name %s' % (text, dotted_name,)) return (text, apigen_relpath + 'api/%s.html' % (dotted_name,)) elif name == 'source': assert text.startswith('py/'), ('source link "%s" does not point ' 'to the py package') % (text,) relpath = '/'.join(text.split('/')[1:]) if check: pkgroot = py.__package__.getpath() abspath = pkgroot.join(relpath) assert pkgroot.join(relpath).check(), ( 'problem with linkrole :source:`%s`: ' 'path %s does not exist' % (text, relpath)) if relpath.endswith('/') or not relpath: relpath += 'index.html' else: relpath += '.html' return (text, apigen_relpath + 'source/%s' % (relpath,))
Python
#
Python
import py html = py.xml.html class my(html): "a custom style" class body(html.body): style = html.Style(font_size = "120%") class h2(html.h2): style = html.Style(background = "grey") class p(html.p): style = html.Style(font_weight="bold") doc = my.html( my.head(), my.body( my.h2("hello world"), my.p("bold as bold can") ) ) print doc.unicode(indent=2)
Python
from py.test import raises import py def otherfunc(a,b): assert a==b def somefunc(x,y): otherfunc(x,y) def otherfunc_multi(a,b): assert (a == b) class TestFailing(object): def test_simple(self): def f(): return 42 def g(): return 43 assert f() == g() def test_simple_multiline(self): otherfunc_multi( 42, 6*9) def test_not(self): def f(): return 42 assert not f() def test_complex_error(self): def f(): return 44 def g(): return 43 somefunc(f(), g()) def test_z1_unpack_error(self): l = [] a,b = l def test_z2_type_error(self): l = 3 a,b = l def test_startswith(self): s = "123" g = "456" assert s.startswith(g) def test_startswith_nested(self): def f(): return "123" def g(): return "456" assert f().startswith(g()) def test_global_func(self): assert isinstance(globf(42), float) def test_instance(self): self.x = 6*7 assert self.x != 42 def test_compare(self): assert globf(10) < 5 def test_try_finally(self): x = 1 try: assert x == 0 finally: x = 0 def test_raises(self): s = 'qwe' raises(TypeError, "int(s)") def test_raises_doesnt(self): raises(IOError, "int('3')") def test_raise(self): raise ValueError("demo error") def test_tupleerror(self): a,b = [1] def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] print "l is", l a,b = l.pop() def test_some_error(self): if namenotexi: pass def test_generator(self): yield None def func1(self): assert 41 == 42 def test_generator2(self): yield self.func1 # thanks to Matthew Scott for this test def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' module = py.std.imp.new_module(name) code = py.code.compile(src, name, 'exec') exec code in module.__dict__ py.std.sys.modules[name] = module module.foo() def globf(x): return x+1
Python
from py.xml import html paras = "First Para", "Second para" doc = html.html( html.head( html.meta(name="Content-Type", value="text/html; charset=latin1")), html.body( [html.p(p) for p in paras])) print unicode(doc).encode('latin1')
Python
import py class ns(py.xml.Namespace): pass doc = ns.books( ns.book( ns.author("May Day"), ns.title("python for java programmers"),), ns.book( ns.author("why", class_="somecssclass"), ns.title("Java for Python programmers"),), publisher="N.N", ) print doc.unicode(indent=2).encode('utf8')
Python
"""Defines a safe repr function. This will always return a string of "reasonable" length no matter what the object does in it's own repr function. Let's examine what can go wrong in an arbitrary repr function. The default repr will return something like (on Win32 anyway): <foo.bar object at 0x008D5650>. Well behaved user-defined repr() methods will do similar. The usual expectation is that repr will return a single line string. 1. However, the repr method can raise an exception of an arbitrary type. Also, the return value may not be as expected: 2. The return value may not be a string! 3. The return value may not be a single line string, it may contain line breaks. 4. The method may enter a loop and never return. 5. The return value may be enormous, eg range(100000) The standard library has a nice implementation in the repr module that will do the job, but the exception handling is silent, so the the output contains no clue that repr() call raised an exception. I would like to be told if repr raises an exception, it's a serious error, so a sublass of repr overrides the method that does repr for class instances.""" import repr import __builtin__ class SafeRepr(repr.Repr): def __init__(self, *args, **kwargs): repr.Repr.__init__(self, *args, **kwargs) # Do we need a commandline switch for this? self.maxstring = 240 # 3 * 80 chars self.maxother = 160 # 2 * 80 chars def repr_instance(self, x, level): try: # Try the vanilla repr and make sure that the result is a string s = str(__builtin__.repr(x)) except (KeyboardInterrupt, MemoryError, SystemExit): raise except Exception ,e: try: exc_name = e.__class__.__name__ except: exc_name = 'unknown' try: exc_info = str(e) except: exc_info = 'unknown' return '<[%s("%s") raised in repr()] %s object at 0x%x>' % \ (exc_name, exc_info, x.__class__.__name__, id(x)) except: try: name = x.__class__.__name__ except: name = 'unknown' return '<[unknown exception raised in repr()] %s object at 0x%x>' % \ (name, id(x)) if len(s) > self.maxstring: i = max(0, (self.maxstring-3)//2) j = max(0, self.maxstring-3-i) s = s[:i] + '...' + s[len(s)-j:] return s _repr = SafeRepr().repr
Python
from __future__ import generators import py class TracebackEntry(object): """ a single entry in a traceback """ exprinfo = None def __init__(self, rawentry): self._rawentry = rawentry self.frame = py.code.Frame(rawentry.tb_frame) self.lineno = rawentry.tb_lineno - 1 self.relline = self.lineno - self.frame.code.firstlineno def __repr__(self): return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1) def statement(self): """ return a py.code.Source object for the current statement """ source = self.frame.code.fullsource return source.getstatement(self.lineno) statement = property(statement, None, None, "statement of this traceback entry.") def path(self): return self.frame.code.path path = property(path, None, None, "path to the full source code") def getlocals(self): return self.frame.f_locals locals = property(getlocals, None, None, "locals of underlaying frame") def reinterpret(self): """Reinterpret the failing statement and returns a detailed information about what operations are performed.""" if self.exprinfo is None: from py.__.magic import exprinfo source = str(self.statement).strip() x = exprinfo.interpret(source, self.frame, should_fail=True) if not isinstance(x, str): raise TypeError, "interpret returned non-string %r" % (x,) self.exprinfo = x return self.exprinfo def getfirstlinesource(self): return self.frame.code.firstlineno def getsource(self): """ return failing source code. """ source = self.frame.code.fullsource start = self.getfirstlinesource() end = self.lineno try: _, end = source.getstatementrange(end) except IndexError: end = self.lineno + 1 # heuristic to stop displaying source on e.g. # if something: # assume this causes a NameError # # _this_ lines and the one # below we don't want from entry.getsource() for i in range(self.lineno, end): if source[i].rstrip().endswith(':'): end = i + 1 break return source[start:end] source = property(getsource) def ishidden(self): """ return True if the current frame has a var __tracebackhide__ resolving to True mostly for internal use """ try: return self.frame.eval("__tracebackhide__") except (SystemExit, KeyboardInterrupt): raise except: return False def __str__(self): try: fn = str(self.path) except py.error.Error: fn = '???' name = self.frame.code.name try: line = str(self.statement).lstrip() except EnvironmentError, e: line = "<could not get sourceline>" return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) def name(self): return self.frame.code.raw.co_name name = property(name, None, None, "co_name of underlaying code") class Traceback(list): """ Traceback objects encapsulate and offer higher level access to Traceback entries. """ Entry = TracebackEntry def __init__(self, tb): """ initialize from given python traceback object. """ if hasattr(tb, 'tb_next'): def f(cur): while cur is not None: yield self.Entry(cur) cur = cur.tb_next list.__init__(self, f(tb)) else: list.__init__(self, tb) def cut(self, path=None, lineno=None, firstlineno=None): """ return a Traceback instance wrapping part of this Traceback by provding any combination of path, lineno and firstlineno, the first frame to start the to-be-returned traceback is determined this allows cutting the first part of a Traceback instance e.g. for formatting reasons (removing some uninteresting bits that deal with handling of the exception/traceback) """ for x in self: if ((path is None or x.frame.code.path == path) and (lineno is None or x.lineno == lineno) and (firstlineno is None or x.frame.code.firstlineno == firstlineno)): return Traceback(x._rawentry) return self def __getitem__(self, key): val = super(Traceback, self).__getitem__(key) if isinstance(key, type(slice(0))): val = self.__class__(val) return val def filter(self, fn=lambda x: not x.ishidden()): """ return a Traceback instance with certain items removed fn is a function that gets a single argument, a TracebackItem instance, and should return True when the item should be added to the Traceback, False when not by default this removes all the TracebackItems which are hidden (see ishidden() above) """ return Traceback(filter(fn, self)) def getcrashentry(self): """ return last non-hidden traceback entry that lead to the exception of a traceback. """ tb = self.filter() if not tb: tb = self return tb[-1] def recursionindex(self): """ return the index of the frame/TracebackItem where recursion originates if appropriate, None if no recursion occurred """ cache = {} for i, entry in py.builtin.enumerate(self): key = entry.frame.code.path, entry.lineno #print "checking for recursion at", key l = cache.setdefault(key, []) if l: f = entry.frame loc = f.f_locals for otherloc in l: if f.is_true(f.eval(co_equal, __recursioncache_locals_1=loc, __recursioncache_locals_2=otherloc)): return i l.append(entry.frame.f_locals) return None # def __str__(self): # for x in self # l = [] ## for func, entry in self._tblist: # l.append(entry.display()) # return "".join(l) co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', '?', 'eval')
Python
from __future__ import generators import sys import inspect, tokenize import py cpy_compile = compile # DON'T IMPORT PY HERE class Source(object): """ a mutable object holding a source code fragment, possibly deindenting it. """ def __init__(self, *parts, **kwargs): self.lines = lines = [] de = kwargs.get('deindent', True) rstrip = kwargs.get('rstrip', True) for part in parts: if not part: partlines = [] if isinstance(part, Source): partlines = part.lines elif isinstance(part, (unicode, str)): partlines = part.split('\n') if rstrip: while partlines: if partlines[-1].strip(): break partlines.pop() else: partlines = getsource(part, deindent=de).lines if de: partlines = deindent(partlines) lines.extend(partlines) def __eq__(self, other): try: return self.lines == other.lines except AttributeError: if isinstance(other, str): return str(self) == other return False def __getitem__(self, key): if isinstance(key, int): return self.lines[key] else: if key.step not in (None, 1): raise IndexError("cannot slice a Source with a step") return self.__getslice__(key.start, key.stop) def __len__(self): return len(self.lines) def __getslice__(self, start, end): newsource = Source() newsource.lines = self.lines[start:end] return newsource def strip(self): """ return new source object with trailing and leading blank lines removed. """ start, end = 0, len(self) while start < end and not self.lines[start].strip(): start += 1 while end > start and not self.lines[end-1].strip(): end -= 1 source = Source() source.lines[:] = self.lines[start:end] return source def putaround(self, before='', after='', indent=' ' * 4): """ return a copy of the source object with 'before' and 'after' wrapped around it. """ before = Source(before) after = Source(after) newsource = Source() lines = [ (indent + line) for line in self.lines] newsource.lines = before.lines + lines + after.lines return newsource def indent(self, indent=' ' * 4): """ return a copy of the source object with all lines indented by the given indent-string. """ newsource = Source() newsource.lines = [(indent+line) for line in self.lines] return newsource def getstatement(self, lineno): """ return Source statement which contains the given linenumber (counted from 0). """ start, end = self.getstatementrange(lineno) return self[start:end] def getstatementrange(self, lineno): """ return (start, end) tuple which spans the minimal statement region which containing the given lineno. """ # XXX there must be a better than these heuristic ways ... # XXX there may even be better heuristics :-) if not (0 <= lineno < len(self)): raise IndexError("lineno out of range") # 1. find the start of the statement from codeop import compile_command for start in range(lineno, -1, -1): trylines = self.lines[start:lineno+1] # quick hack to indent the source and get it as a string in one go trylines.insert(0, 'def xxx():') trysource = '\n '.join(trylines) # ^ space here try: compile_command(trysource) except (SyntaxError, OverflowError, ValueError): pass else: break # got a valid or incomplete statement # 2. find the end of the statement for end in range(lineno+1, len(self)+1): trysource = self[start:end] if trysource.isparseable(): break return start, end def getblockend(self, lineno): # XXX lines = [x + '\n' for x in self.lines[lineno:]] blocklines = inspect.getblock(lines) #print blocklines return lineno + len(blocklines) - 1 def deindent(self, offset=None): """ return a new source object deindented by offset. If offset is None then guess an indentation offset from the first non-blank line. Subsequent lines which have a lower indentation offset will be copied verbatim as they are assumed to be part of multilines. """ # XXX maybe use the tokenizer to properly handle multiline # strings etc.pp? newsource = Source() newsource.lines[:] = deindent(self.lines, offset) return newsource def isparseable(self, deindent=True): """ return True if source is parseable, heuristically deindenting it by default. """ import parser if deindent: source = str(self.deindent()) else: source = str(self) try: parser.suite(source+'\n') except (parser.ParserError, SyntaxError): return False else: return True def __str__(self): return "\n".join(self.lines) def compile(self, filename=None, mode='exec', flag=generators.compiler_flag, dont_inherit=0): """ return compiled code object. if filename is None invent an artificial filename which displays the source/line position of the caller frame. """ if not filename or py.path.local(filename).check(file=0): frame = sys._getframe(1) # the caller filename = '%s<%s:%d>' % (filename, frame.f_code.co_filename, frame.f_lineno) source = "\n".join(self.lines) + '\n' try: co = cpy_compile(source, filename, mode, flag) except SyntaxError, ex: # re-represent syntax errors from parsing python strings msglines = self.lines[:ex.lineno] if ex.offset: msglines.append(" "*ex.offset + '^') msglines.append("syntax error probably generated here: %s" % filename) newex = SyntaxError('\n'.join(msglines)) newex.offset = ex.offset newex.lineno = ex.lineno newex.text = ex.text raise newex else: co_filename = MyStr(filename) co_filename.__source__ = self return py.code.Code(co).new(rec=1, co_filename=co_filename) #return newcode_withfilename(co, co_filename) # # public API shortcut functions # def compile_(source, filename=None, mode='exec', flags= generators.compiler_flag, dont_inherit=0): """ compile the given source to a raw code object, which points back to the source code through "co_filename.__source__". All code objects contained in the code object will recursively also have this special subclass-of-string filename. """ s = Source(source) co = s.compile(filename, mode, flags) return co # # various helper functions # class MyStr(str): """ custom string which allows to add attributes. """ def getsource(obj, **kwargs): if hasattr(obj, 'func_code'): obj = obj.func_code elif hasattr(obj, 'f_code'): obj = obj.f_code try: fullsource = obj.co_filename.__source__ except AttributeError: try: strsrc = inspect.getsource(obj) except IndentationError: strsrc = "\"Buggy python version consider upgrading, cannot get source\"" assert isinstance(strsrc, str) return Source(strsrc, **kwargs) else: lineno = obj.co_firstlineno - 1 end = fullsource.getblockend(lineno) return fullsource[lineno:end+1] def deindent(lines, offset=None): if offset is None: for line in lines: line = line.expandtabs() s = line.lstrip() if s: offset = len(line)-len(s) break else: offset = 0 if offset == 0: return list(lines) newlines = [] def readline_generator(lines): for line in lines: yield line + '\n' while True: yield '' readline = readline_generator(lines).next try: for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(readline): if sline > len(lines): break # End of input reached if sline > len(newlines): line = lines[sline - 1].expandtabs() if line.lstrip() and line[:offset].isspace(): line = line[offset:] # Deindent newlines.append(line) for i in range(sline, eline): # Don't deindent continuing lines of # multiline tokens (i.e. multiline strings) newlines.append(lines[i]) except (IndentationError, tokenize.TokenError): pass # Add any lines we didn't see. E.g. if an exception was raised. newlines.extend(lines[len(newlines):]) return newlines
Python
#
Python
import py class Code(object): """ wrapper around Python code objects """ def __init__(self, rawcode): rawcode = getattr(rawcode, 'im_func', rawcode) rawcode = getattr(rawcode, 'func_code', rawcode) self.raw = rawcode self.filename = rawcode.co_filename try: self.firstlineno = rawcode.co_firstlineno - 1 except AttributeError: raise TypeError("not a code object: %r" %(rawcode,)) self.name = rawcode.co_name def __eq__(self, other): return self.raw == other.raw def __ne__(self, other): return not self == other def new(self, rec=False, **kwargs): """ return new code object with modified attributes. if rec-cursive is true then dive into code objects contained in co_consts. """ names = [x for x in dir(self.raw) if x[:3] == 'co_'] for name in kwargs: if name not in names: raise TypeError("unknown code attribute: %r" %(name, )) if rec: newconstlist = [] co = self.raw cotype = type(co) for c in co.co_consts: if isinstance(c, cotype): c = self.__class__(c).new(rec=True, **kwargs) newconstlist.append(c) return self.new(rec=False, co_consts=tuple(newconstlist), **kwargs) for name in names: if name not in kwargs: kwargs[name] = getattr(self.raw, name) return py.std.new.code( kwargs['co_argcount'], kwargs['co_nlocals'], kwargs['co_stacksize'], kwargs['co_flags'], kwargs['co_code'], kwargs['co_consts'], kwargs['co_names'], kwargs['co_varnames'], kwargs['co_filename'], kwargs['co_name'], kwargs['co_firstlineno'], kwargs['co_lnotab'], kwargs['co_freevars'], kwargs['co_cellvars'], ) def path(self): """ return a py.path.local object wrapping the source of the code """ try: return self.raw.co_filename.__path__ except AttributeError: return py.path.local(self.raw.co_filename) path = property(path, None, None, "path of this code object") def fullsource(self): """ return a py.code.Source object for the full source file of the code """ fn = self.raw.co_filename try: return fn.__source__ except AttributeError: return py.code.Source(self.path.read(mode="rU")) fullsource = property(fullsource, None, None, "full source containing this code object") def source(self): """ return a py.code.Source object for the code object's source only """ # return source only for that part of code import inspect return py.code.Source(inspect.getsource(self.raw)) def getargs(self): """ return a tuple with the argument names for the code object """ # handfull shortcut for getting args raw = self.raw return raw.co_varnames[:raw.co_argcount]
Python
from __future__ import generators import sys import py class ExceptionInfo(object): """ wraps sys.exc_info() objects and offers help for navigating the traceback. """ _striptext = '' def __init__(self, tup=None, exprinfo=None): # NB. all attributes are private! Subclasses or other # ExceptionInfo-like classes may have different attributes. if tup is None: tup = sys.exc_info() if exprinfo is None and isinstance(tup[1], py.magic.AssertionError): exprinfo = tup[1].msg if exprinfo and exprinfo.startswith('assert '): self._striptext = 'AssertionError: ' self._excinfo = tup self.type, self.value, tb = self._excinfo self.typename = str(self.type) self.traceback = py.code.Traceback(tb) def exconly(self, tryshort=False): """ return the exception as a string when 'tryshort' resolves to True, and the exception is a py.magic.AssertionError, only the actual exception part of the exception representation is returned (so 'AssertionError: ' is removed from the beginning) """ lines = py.std.traceback.format_exception_only(self.type, self.value) text = ''.join(lines) if text.endswith('\n'): text = text[:-1] if tryshort: if text.startswith(self._striptext): text = text[len(self._striptext):] return text def errisinstance(self, exc): """ return True if the exception is an instance of exc """ return isinstance(self.value, exc) def __str__(self): # XXX wrong str return self.exconly()
Python
""" python inspection/code generation API """
Python
import py import py.__.code.safe_repr class Frame(object): """Wrapper around a Python frame holding f_locals and f_globals in which expressions can be evaluated.""" def __init__(self, frame): self.code = py.code.Code(frame.f_code) self.lineno = frame.f_lineno - 1 self.f_globals = frame.f_globals self.f_locals = frame.f_locals self.raw = frame def statement(self): return self.code.fullsource.getstatement(self.lineno) statement = property(statement, None, None, "statement this frame is at") def eval(self, code, **vars): """ evaluate 'code' in the frame 'vars' are optional additional local variables returns the result of the evaluation """ f_locals = self.f_locals.copy() f_locals.update(vars) return eval(code, self.f_globals, f_locals) def exec_(self, code, **vars): """ exec 'code' in the frame 'vars' are optiona; additional local variables """ f_locals = self.f_locals.copy() f_locals.update(vars) exec code in self.f_globals, f_locals def repr(self, object): """ return a 'safe' (non-recursive, one-line) string repr for 'object' """ return py.__.code.safe_repr._repr(object) def is_true(self, object): return object def getargs(self): """ return a list of tuples (name, value) for all arguments """ retval = [] for arg in self.code.getargs(): retval.append((arg, self.f_locals[arg])) return retval
Python
#!/usr/bin/env python import sys, os, os.path progpath = sys.argv[0] packagedir = os.path.abspath(os.path.dirname(progpath)) packagename = os.path.basename(packagedir) bindir = os.path.join(packagedir, 'bin') if sys.platform == 'win32': bindir = os.path.join(bindir, 'win32') rootdir = os.path.dirname(packagedir) def prepend_path(name, value): sep = os.path.pathsep curpath = os.environ.get(name, '') newpath = [value] + [ x for x in curpath.split(sep) if x and x != value ] return setenv(name, sep.join(newpath)) def setenv(name, value): shell = os.environ.get('SHELL', '') comspec = os.environ.get('COMSPEC', '') if shell.endswith('csh'): cmd = 'setenv %s "%s"' % (name, value) elif shell.endswith('sh'): cmd = '%s="%s"; export %s' % (name, value, name) elif comspec.endswith('cmd.exe'): cmd = 'set %s=%s' % (name, value) else: assert False, 'Shell not supported.' return cmd print prepend_path('PATH', bindir) print prepend_path('PYTHONPATH', rootdir)
Python
import thread class ThreadOut(object): """ A file like object that diverts writing operations to per-thread writefuncs. This is a py lib internal class and not meant for outer use or modification. """ def __new__(cls, obj, attrname): """ Divert file output to per-thread writefuncs. the given obj and attrname describe the destination of the file. """ current = getattr(obj, attrname) if isinstance(current, cls): current._used += 1 return current self = object.__new__(cls) self._tid2out = {} self._used = 1 self._oldout = getattr(obj, attrname) self._defaultwriter = self._oldout.write self._address = (obj, attrname) setattr(obj, attrname, self) return self def isatty(self): # XXX return False def setdefaultwriter(self, writefunc): self._defaultwriter = writefunc def resetdefault(self): self._defaultwriter = self._oldout.write def softspace(): def fget(self): return self._get()[0] def fset(self, value): self._get()[0] = value return property(fget, fset, None, "software attribute") softspace = softspace() def deinstall(self): self._used -= 1 x = self._used if x <= 0: obj, attrname = self._address setattr(obj, attrname, self._oldout) def setwritefunc(self, writefunc, tid=None): assert callable(writefunc) if tid is None: tid = thread.get_ident() self._tid2out[tid] = [0, writefunc] def delwritefunc(self, tid=None, ignoremissing=True): if tid is None: tid = thread.get_ident() try: del self._tid2out[tid] except KeyError: if not ignoremissing: raise def _get(self): tid = thread.get_ident() try: return self._tid2out[tid] except KeyError: return getattr(self._defaultwriter, 'softspace', 0), self._defaultwriter def write(self, data): softspace, out = self._get() out(data) def flush(self): pass
Python
import Queue import threading import time import sys ERRORMARKER = object() class Reply(object): """ reply instances provide access to the result of a function execution that got dispatched through WorkerPool.dispatch() """ _excinfo = None def __init__(self, task): self.task = task self._queue = Queue.Queue() def _set(self, result): self._queue.put(result) def _setexcinfo(self, excinfo): self._excinfo = excinfo self._queue.put(ERRORMARKER) def _get_with_timeout(self, timeout): # taken from python2.3's Queue.get() # we want to run on python2.2 here delay = 0.0005 # 500 us -> initial delay of 1 ms endtime = time.time() + timeout while 1: try: return self._queue.get_nowait() except Queue.Empty: remaining = endtime - time.time() if remaining <= 0: #time is over and no element arrived raise IOError("timeout waiting for task %r" %(self.task,)) delay = min(delay * 2, remaining, .05) time.sleep(delay) #reduce CPU usage by using a sleep def get(self, timeout=None): """ get the result object from an asynchronous function execution. if the function execution raised an exception, then calling get() will reraise that exception including its traceback. """ if self._queue is None: raise EOFError("reply has already been delivered") if timeout is not None: result = self._get_with_timeout(timeout) else: result = self._queue.get() if result is ERRORMARKER: self._queue = None excinfo = self._excinfo raise excinfo[0], excinfo[1], excinfo[2] return result class WorkerThread(threading.Thread): def __init__(self, pool): threading.Thread.__init__(self) self._queue = Queue.Queue() self._pool = pool self.setDaemon(1) def _run_once(self): reply = self._queue.get() if reply is SystemExit: return False assert self not in self._pool._ready task = reply.task try: func, args, kwargs = task result = func(*args, **kwargs) except (SystemExit, KeyboardInterrupt): return False except: reply._setexcinfo(sys.exc_info()) else: reply._set(result) # at this point, reply, task and all other local variables go away return True def run(self): try: while self._run_once(): self._pool._ready[self] = True finally: del self._pool._alive[self] try: del self._pool._ready[self] except KeyError: pass def send(self, task): reply = Reply(task) self._queue.put(reply) return reply def stop(self): self._queue.put(SystemExit) class WorkerPool(object): """ A WorkerPool allows to dispatch function executions to threads. Each Worker Thread is reused for multiple function executions. The dispatching operation takes care to create and dispatch to existing threads. You need to call shutdown() to signal the WorkerThreads to terminate and join() in order to wait until all worker threads have terminated. """ _shuttingdown = False def __init__(self, maxthreads=None): """ init WorkerPool instance which may create up to `maxthreads` worker threads. """ self.maxthreads = maxthreads self._ready = {} self._alive = {} def dispatch(self, func, *args, **kwargs): """ return Reply object for the asynchronous dispatch of the given func(*args, **kwargs) in a separate worker thread. """ if self._shuttingdown: raise IOError("WorkerPool is already shutting down") try: thread, _ = self._ready.popitem() except KeyError: # pop from empty list if self.maxthreads and len(self._alive) >= self.maxthreads: raise IOError("can't create more than %d threads." % (self.maxthreads,)) thread = self._newthread() return thread.send((func, args, kwargs)) def _newthread(self): thread = WorkerThread(self) self._alive[thread] = True thread.start() return thread def shutdown(self): """ signal all worker threads to terminate. call join() to wait until all threads termination. """ if not self._shuttingdown: self._shuttingdown = True for t in self._alive.keys(): t.stop() def join(self, timeout=None): """ wait until all worker threads have terminated. """ current = threading.currentThread() deadline = delta = None if timeout is not None: deadline = time.time() + timeout for thread in self._alive.keys(): if deadline: delta = deadline - time.time() if delta <= 0: raise IOError("timeout while joining threads") thread.join(timeout=delta) if thread.isAlive(): raise IOError("timeout while joining threads") class NamedThreadPool: def __init__(self, **kw): self._namedthreads = {} for name, value in kw.items(): self.start(name, value) def __repr__(self): return "<NamedThreadPool %r>" %(self._namedthreads) def get(self, name=None): if name is None: l = [] for x in self._namedthreads.values(): l.extend(x) return l else: return self._namedthreads.get(name, []) def getstarted(self, name=None): return [t for t in self.get(name) if t.isAlive()] def prunestopped(self, name=None): if name is None: for name in self.names(): self.prunestopped(name) else: self._namedthreads[name] = self.getstarted(name) def names(self): return self._namedthreads.keys() def start(self, name, func): l = self._namedthreads.setdefault(name, []) thread = threading.Thread(name="%s%d" % (name, len(l)), target=func) thread.start() l.append(thread)
Python
#
Python
#
Python
"""Classes to represent arbitrary sets (including sets of sets). This module implements sets using dictionaries whose values are ignored. The usual operations (union, intersection, deletion, etc.) are provided as both methods and operators. Important: sets are not sequences! While they support 'x in s', 'len(s)', and 'for x in s', none of those operations are unique for sequences; for example, mappings support all three as well. The characteristic operation for sequences is subscripting with small integers: s[i], for i in range(len(s)). Sets don't support subscripting at all. Also, sequences allow multiple occurrences and their elements have a definite order; sets on the other hand don't record multiple occurrences and don't remember the order of element insertion (which is why they don't support s[i]). The following classes are provided: BaseSet -- All the operations common to both mutable and immutable sets. This is an abstract class, not meant to be directly instantiated. Set -- Mutable sets, subclass of BaseSet; not hashable. ImmutableSet -- Immutable sets, subclass of BaseSet; hashable. _TemporarilyImmutableSet -- A wrapper around a Set, hashable, giving the same hash value as the immutable set equivalent would have. Do not use this class directly. Only hashable objects can be added to a Set. In particular, you cannot really add a Set as an element to another Set; if you try, what is actually added is an ImmutableSet built from it (it compares equal to the one you tried adding). When you ask if `x in y' where x is a Set and y is a Set or ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and what's tested is actually `z in y'. """ # Code history: # # - Greg V. Wilson wrote the first version, using a different approach # to the mutable/immutable problem, and inheriting from dict. # # - Alex Martelli modified Greg's version to implement the current # Set/ImmutableSet approach, and make the data an attribute. # # - Guido van Rossum rewrote much of the code, made some API changes, # and cleaned up the docstrings. # # - Raymond Hettinger added a number of speedups and other # improvements. from __future__ import generators try: from itertools import ifilter, ifilterfalse except ImportError: # Code to make the module run under Py2.2 def ifilter(predicate, iterable): if predicate is None: def predicate(x): return x for x in iterable: if predicate(x): yield x def ifilterfalse(predicate, iterable): if predicate is None: def predicate(x): return x for x in iterable: if not predicate(x): yield x try: True, False except NameError: True, False = (0==0, 0!=0) __all__ = ['BaseSet', 'Set', 'ImmutableSet'] class BaseSet(object): """Common base class for mutable and immutable sets.""" __slots__ = ['_data'] # Constructor def __init__(self): """This is an abstract class.""" # Don't call this from a concrete subclass! if self.__class__ is BaseSet: raise TypeError, ("BaseSet is an abstract class. " "Use Set or ImmutableSet.") # Standard protocols: __len__, __repr__, __str__, __iter__ def __len__(self): """Return the number of elements of a set.""" return len(self._data) def __repr__(self): """Return string representation of a set. This looks like 'Set([<list of elements>])'. """ return self._repr() # __str__ is the same as __repr__ __str__ = __repr__ def _repr(self, sorted=False): elements = self._data.keys() if sorted: elements.sort() return '%s(%r)' % (self.__class__.__name__, elements) def __iter__(self): """Return an iterator over the elements or a set. This is the keys iterator for the underlying dict. """ return self._data.iterkeys() # Three-way comparison is not supported. However, because __eq__ is # tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and # then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this # case). def __cmp__(self, other): raise TypeError, "can't compare sets using cmp()" # Equality comparisons using the underlying dicts. Mixed-type comparisons # are allowed here, where Set == z for non-Set z always returns False, # and Set != z always True. This allows expressions like "x in y" to # give the expected result when y is a sequence of mixed types, not # raising a pointless TypeError just because y contains a Set, or x is # a Set and y contain's a non-set ("in" invokes only __eq__). # Subtle: it would be nicer if __eq__ and __ne__ could return # NotImplemented instead of True or False. Then the other comparand # would get a chance to determine the result, and if the other comparand # also returned NotImplemented then it would fall back to object address # comparison (which would always return False for __eq__ and always # True for __ne__). However, that doesn't work, because this type # *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented, # Python tries __cmp__ next, and the __cmp__ here then raises TypeError. def __eq__(self, other): if isinstance(other, BaseSet): return self._data == other._data else: return False def __ne__(self, other): if isinstance(other, BaseSet): return self._data != other._data else: return True # Copying operations def copy(self): """Return a shallow copy of a set.""" result = self.__class__() result._data.update(self._data) return result __copy__ = copy # For the copy module def __deepcopy__(self, memo): """Return a deep copy of a set; used by copy module.""" # This pre-creates the result and inserts it in the memo # early, in case the deep copy recurses into another reference # to this same set. A set can't be an element of itself, but # it can certainly contain an object that has a reference to # itself. from copy import deepcopy result = self.__class__() memo[id(self)] = result data = result._data value = True for elt in self: data[deepcopy(elt, memo)] = value return result # Standard set operations: union, intersection, both differences. # Each has an operator version (e.g. __or__, invoked with |) and a # method version (e.g. union). # Subtle: Each pair requires distinct code so that the outcome is # correct when the type of other isn't suitable. For example, if # we did "union = __or__" instead, then Set().union(3) would return # NotImplemented instead of raising TypeError (albeit that *why* it # raises TypeError as-is is also a bit subtle). def __or__(self, other): """Return the union of two sets as a new set. (I.e. all elements that are in either set.) """ if not isinstance(other, BaseSet): return NotImplemented return self.union(other) def union(self, other): """Return the union of two sets as a new set. (I.e. all elements that are in either set.) """ result = self.__class__(self) result._update(other) return result def __and__(self, other): """Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.) """ if not isinstance(other, BaseSet): return NotImplemented return self.intersection(other) def intersection(self, other): """Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.) """ if not isinstance(other, BaseSet): other = Set(other) if len(self) <= len(other): little, big = self, other else: little, big = other, self common = ifilter(big._data.has_key, little) return self.__class__(common) def __xor__(self, other): """Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.) """ if not isinstance(other, BaseSet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference(self, other): """Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.) """ result = self.__class__() data = result._data value = True selfdata = self._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data for elt in ifilterfalse(otherdata.has_key, selfdata): data[elt] = value for elt in ifilterfalse(selfdata.has_key, otherdata): data[elt] = value return result def __sub__(self, other): """Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.) """ if not isinstance(other, BaseSet): return NotImplemented return self.difference(other) def difference(self, other): """Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.) """ result = self.__class__() data = result._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data value = True for elt in ifilterfalse(otherdata.has_key, self): data[elt] = value return result # Membership test def __contains__(self, element): """Report whether an element is a member of a set. (Called in response to the expression `element in self'.) """ try: return element in self._data except TypeError: transform = getattr(element, "__as_temporarily_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught return transform() in self._data # Subset and superset test def issubset(self, other): """Report whether another set contains this set.""" self._binary_sanity_check(other) if len(self) > len(other): # Fast check for obvious cases return False for elt in ifilterfalse(other._data.has_key, self): return False return True def issuperset(self, other): """Report whether this set contains another set.""" self._binary_sanity_check(other) if len(self) < len(other): # Fast check for obvious cases return False for elt in ifilterfalse(self._data.has_key, other): return False return True # Inequality comparisons using the is-subset relation. __le__ = issubset __ge__ = issuperset def __lt__(self, other): self._binary_sanity_check(other) return len(self) < len(other) and self.issubset(other) def __gt__(self, other): self._binary_sanity_check(other) return len(self) > len(other) and self.issuperset(other) # Assorted helpers def _binary_sanity_check(self, other): # Check that the other argument to a binary operation is also # a set, raising a TypeError otherwise. if not isinstance(other, BaseSet): raise TypeError, "Binary operation only permitted between sets" def _compute_hash(self): # Calculate hash code for a set by xor'ing the hash codes of # the elements. This ensures that the hash code does not depend # on the order in which elements are added to the set. This is # not called __hash__ because a BaseSet should not be hashable; # only an ImmutableSet is hashable. result = 0 for elt in self: result ^= hash(elt) return result def _update(self, iterable): # The main loop for update() and the subclass __init__() methods. data = self._data # Use the fast update() method when a dictionary is available. if isinstance(iterable, BaseSet): data.update(iterable._data) return value = True if type(iterable) in (list, tuple, xrange): # Optimized: we know that __iter__() and next() can't # raise TypeError, so we can move 'try:' out of the loop. it = iter(iterable) while True: try: for element in it: data[element] = value return except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught data[transform()] = value else: # Safe: only catch TypeError where intended for element in iterable: try: data[element] = value except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught data[transform()] = value class ImmutableSet(BaseSet): """Immutable set class.""" __slots__ = ['_hashcode'] # BaseSet + hashing def __init__(self, iterable=None): """Construct an immutable set from an optional iterable.""" self._hashcode = None self._data = {} if iterable is not None: self._update(iterable) def __hash__(self): if self._hashcode is None: self._hashcode = self._compute_hash() return self._hashcode def __getstate__(self): return self._data, self._hashcode def __setstate__(self, state): self._data, self._hashcode = state class Set(BaseSet): """ Mutable set class.""" __slots__ = [] # BaseSet + operations requiring mutability; no hashing def __init__(self, iterable=None): """Construct a set from an optional iterable.""" self._data = {} if iterable is not None: self._update(iterable) def __getstate__(self): # getstate's results are ignored if it is not return self._data, def __setstate__(self, data): self._data, = data def __hash__(self): """A Set cannot be hashed.""" # We inherit object.__hash__, so we must deny this explicitly raise TypeError, "Can't hash a Set, only an ImmutableSet." # In-place union, intersection, differences. # Subtle: The xyz_update() functions deliberately return None, # as do all mutating operations on built-in container types. # The __xyz__ spellings have to return self, though. def __ior__(self, other): """Update a set with the union of itself and another.""" self._binary_sanity_check(other) self._data.update(other._data) return self def union_update(self, other): """Update a set with the union of itself and another.""" self._update(other) def __iand__(self, other): """Update a set with the intersection of itself and another.""" self._binary_sanity_check(other) self._data = (self & other)._data return self def intersection_update(self, other): """Update a set with the intersection of itself and another.""" if isinstance(other, BaseSet): self &= other else: self._data = (self.intersection(other))._data def __ixor__(self, other): """Update a set with the symmetric difference of itself and another.""" self._binary_sanity_check(other) self.symmetric_difference_update(other) return self def symmetric_difference_update(self, other): """Update a set with the symmetric difference of itself and another.""" data = self._data value = True if not isinstance(other, BaseSet): other = Set(other) if self is other: self.clear() for elt in other: if elt in data: del data[elt] else: data[elt] = value def __isub__(self, other): """Remove all elements of another set from this set.""" self._binary_sanity_check(other) self.difference_update(other) return self def difference_update(self, other): """Remove all elements of another set from this set.""" data = self._data if not isinstance(other, BaseSet): other = Set(other) if self is other: self.clear() for elt in ifilter(data.has_key, other): del data[elt] # Python dict-like mass mutations: update, clear def update(self, iterable): """Add all values from an iterable (such as a list or file).""" self._update(iterable) def clear(self): """Remove all elements from this set.""" self._data.clear() # Single-element mutations: add, remove, discard def add(self, element): """Add an element to a set. This has no effect if the element is already present. """ try: self._data[element] = True except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught self._data[transform()] = True def remove(self, element): """Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError. """ try: del self._data[element] except TypeError: transform = getattr(element, "__as_temporarily_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught del self._data[transform()] def discard(self, element): """Remove an element from a set if it is a member. If the element is not a member, do nothing. """ try: self.remove(element) except KeyError: pass def pop(self): """Remove and return an arbitrary set element.""" return self._data.popitem()[0] def __as_immutable__(self): # Return a copy of self as an immutable set return ImmutableSet(self) def __as_temporarily_immutable__(self): # Return self wrapped in a temporarily immutable set return _TemporarilyImmutableSet(self) class _TemporarilyImmutableSet(BaseSet): # Wrap a mutable set as if it was temporarily immutable. # This only supplies hashing and equality comparisons. def __init__(self, set): self._set = set self._data = set._data # Needed by ImmutableSet.__eq__() def __hash__(self): return self._set._compute_hash() try: set, frozenset = set, frozenset except NameError: try: from sets import Set as set, ImmutableSet as frozenset except ImportError: set = Set frozenset = ImmutableSet
Python
builtin_cmp = cmp # need to use cmp as keyword arg def _sorted(iterable, cmp=None, key=None, reverse=0): use_cmp = None if key is not None: if cmp is None: def use_cmp(x, y): return builtin_cmp(x[0], y[0]) else: def use_cmp(x, y): return cmp(x[0], y[0]) l = [(key(element), element) for element in iterable] else: if cmp is not None: use_cmp = cmp l = list(iterable) #print l if use_cmp is not None: l.sort(use_cmp) else: l.sort() if reverse: l.reverse() if key is not None: return [element for (_, element) in l] return l try: sorted = sorted except NameError: sorted = _sorted
Python
from __future__ import generators try: reversed = reversed except NameError: def reversed(sequence): """reversed(sequence) -> reverse iterator over values of the sequence Return a reverse iterator """ if hasattr(sequence, '__reversed__'): return sequence.__reversed__() if not hasattr(sequence, '__getitem__'): raise TypeError("argument to reversed() must be a sequence") return reversed_iterator(sequence) class reversed_iterator(object): def __init__(self, seq): self.seq = seq self.remaining = len(seq) def __iter__(self): return self def next(self): i = self.remaining if i > 0: i -= 1 item = self.seq[i] self.remaining = i return item raise StopIteration def __length_hint__(self): return self.remaining
Python
try: BaseException = BaseException except NameError: BaseException = Exception
Python
#
Python
""" backports and additions of builtins """
Python
from __future__ import generators try: enumerate = enumerate except NameError: def enumerate(iterable): i = 0 for x in iterable: yield i, x i += 1
Python
#!/usr/bin/python import cgitb;cgitb.enable() import path import py from py.__.apigen.source.browser import parse_path from py.__.apigen.source.html import create_html, create_dir_html, \ create_unknown_html BASE_URL='http://codespeak.net/svn/py/dist' def cgi_main(): import os reqpath = os.environ.get('PATH_INFO', '') path = py.path.svnurl('%s%s' % (BASE_URL, reqpath)) if not path.check(): return create_unknown_html(path) if path.check(file=True): return unicode(create_html(parse_path(path))) elif path.check(dir=True): prefix = '' if not reqpath: prefix = 'index.cgi/' return create_dir_html(path, href_prefix=prefix) else: return create_unknown_html(path) print 'Content-Type: text/html; charset=UTF-8' print print cgi_main()
Python
""" simple Python syntax coloring """ import re class PythonSchema(object): """ contains information for syntax coloring """ comment = [('#', '\n'), ('#', '$')] multiline_string = ['"""', "'''"] string = ['"""', "'''", '"', "'"] keyword = ['and', 'break', 'continue', 'elif', 'else', 'except', 'finally', 'for', 'if', 'in', 'is', 'not', 'or', 'raise', 'return', 'try', 'while', 'with', 'yield'] alt_keyword = ['as', 'assert', 'class', 'def', 'del', 'exec', 'from', 'global', 'import', 'lambda', 'pass', 'print'] linejoin = r'\\' def assert_keywords(): from keyword import kwlist all = PythonSchema.keyword + PythonSchema.alt_keyword for x in kwlist: assert x in all assert_keywords() class Token(object): data = None type = 'unknown' def __init__(self, data, type='unknown'): self.data = data self.type = type def __repr__(self): return '<Token type="%s" %r>' % (self.type, self.data) def __eq__(self, other): return self.data == other.data and self.type == other.type def __ne__(self, other): return not self.__eq__(other) class Tokenizer(object): """ when fed lists strings, it will return tokens with type info very naive tokenizer, state is recorded for multi-line strings, etc. """ _re_word = re.compile('[\w_]+', re.U) _re_space = re.compile('\s+', re.U) _re_number = re.compile('[\d\.]*\d[\d\.]*l?', re.I | re.U) # XXX cheating a bit with the quotes _re_rest = re.compile('[^\w\s\d\'"]+', re.U) # these will be filled using the schema _re_strings_full = None _re_strings_multiline = None _re_strings_comments = None def __init__(self, schema): self.schema = schema self._inside_multiline = False self._re_strings_full = [] self._re_strings_multiline = [] self._re_strings_empty = [] for d in schema.string + schema.multiline_string: self._re_strings_full.append( re.compile(r'%s[^\\%s]*(\\.[^\\%s]*)+%s' % (d, d, d, d))) self._re_strings_full.append( re.compile(r'%s[^\\%s]+(\\.[^\\%s]*)*%s' % (d, d, d, d))) self._re_strings_empty.append(re.compile('%s%s' % (d, d))) for d in schema.multiline_string: self._re_strings_multiline.append((re.compile('(%s).*' % (d,), re.S), re.compile('.*?%s' % (d,)))) if schema.linejoin: j = schema.linejoin for d in schema.string: self._re_strings_multiline.append( (re.compile('(%s).*%s$' % (d, j)), re.compile('.*?%s' % (d,)))) # no multi-line comments in Python... phew :) self._re_comments = [] for start, end in schema.comment: self._re_comments.append(re.compile('%s.*?%s' % (start, end))) def tokenize(self, data): if self._inside_multiline: m = self._inside_multiline.match(data) if not m: yield Token(data, 'string') data = '' else: s = m.group(0) data = data[len(s):] self._inside_multiline = False yield Token(s, 'string') while data: for f in [self._check_full_strings, self._check_multiline_strings, self._check_empty_strings, self._check_comments, self._check_number, self._check_space, self._check_word, self._check_rest]: data, t = f(data) if t: yield t break else: raise ValueError( 'no token found in %r (bug in tokenizer)' % (data,)) def _check_full_strings(self, data): token = None for r in self._re_strings_full: m = r.match(data) if m: s = m.group(0) data = data[len(s):] token = Token(s, type='string') break return data, token def _check_multiline_strings(self, data): token = None for start, end in self._re_strings_multiline: m = start.match(data) if m: s = m.group(0) data = '' # XXX take care of a problem which is hard to fix with regexps: # '''foo 'bar' baz''' will not match single-line strings # (because [^"""] matches just a single " already), so let's # try to catch it here... (quite Python specific issue!) endm = end.match(s[len(m.group(1)):]) if endm: # see if it ends here already s = m.group(1) + endm.group(0) else: self._inside_multiline = end token = Token(s, 'string') break return data, token def _check_empty_strings(self, data): token = None for r in self._re_strings_empty: m = r.match(data) if m: s = m.group(0) data = data[len(s):] token = Token(s, type='string') break return data, token def _check_comments(self, data): # fortunately we don't have to deal with multi-line comments token = None for r in self._re_comments: m = r.match(data) if m: s = m.group(0) data = data[len(s):] token = Token(s, 'comment') break return data, token def _check_word(self, data): m = self._re_word.match(data) if m: s = m.group(0) type = 'word' if s in self.schema.keyword: type = 'keyword' elif s in self.schema.alt_keyword: type = 'alt_keyword' return data[len(s):], Token(s, type) return data, None def _check_space(self, data): m = self._re_space.match(data) if m: s = m.group(0) return data[len(s):], Token(s, 'whitespace') return data, None def _check_number(self, data): m = self._re_number.match(data) if m: s = m.group(0) return data[len(s):], Token(s, 'number') return data, None def _check_rest(self, data): m = self._re_rest.match(data) if m: s = m.group(0) return data[len(s):], Token(s, 'unknown') return data, None if __name__ == '__main__': import py, sys if len(sys.argv) != 2: print 'usage: %s <filename>' print ' tokenizes the file and prints the tokens per line' sys.exit(1) t = Tokenizer(PythonSchema) p = py.path.local(sys.argv[1]) assert p.ext == '.py' for line in p.read().split('\n'): print repr(line) print 't in multiline mode:', not not t._inside_multiline tokens = t.tokenize(line) print list(tokens)
Python
#!/usr/bin/python import cgitb;cgitb.enable() import path import py from py.__.apigen.source.browser import parse_path from py.__.apigen.source.html import create_html, create_dir_html, \ create_unknown_html BASE_URL='http://codespeak.net/svn/py/dist' def cgi_main(): import os reqpath = os.environ.get('PATH_INFO', '') path = py.path.svnurl('%s%s' % (BASE_URL, reqpath)) if not path.check(): return create_unknown_html(path) if path.check(file=True): return unicode(create_html(parse_path(path))) elif path.check(dir=True): prefix = '' if not reqpath: prefix = 'index.cgi/' return create_dir_html(path, href_prefix=prefix) else: return create_unknown_html(path) print 'Content-Type: text/html; charset=UTF-8' print print cgi_main()
Python
""" web server for displaying source """ import py from pypy.translator.js.examples import server from py.__.apigen.source.browser import parse_path from py.__.apigen.source.html import create_html, create_dir_html, create_unknown_html from py.xml import html class Handler(server.TestHandler): BASE_URL='http://codespeak.net/svn/py/dist' def __getattr__(self, attr): if attr == 'index': attr = '' url = self.BASE_URL + "/" + attr if url.endswith('_py'): url = url[:-3] + '.py' path = py.path.svnurl(url) if not path.check(): def f(rev=None): return create_unknown_html(path) f.exposed = True f.func_name = attr return f def f(rev='HEAD'): path = py.path.svnurl(url, rev) # some try.. except.. here if path.check(file=True): return unicode(create_html(parse_path(path))) elif path.check(dir=True): return create_dir_html(path) else: return create_unknown_html(path) f.exposed = True f.func_name = attr return f def _main(): server.start_server(handler=Handler) if __name__ == '__main__': _main()
Python
""" source browser using compiler module WARNING!!! This is very simple and very silly attempt to make so. """ from compiler import parse, ast import py from py.__.path.common import PathBase blockers = [ast.Function, ast.Class] class BaseElem(object): def listnames(self): if getattr(self, 'parent', None): return self.parent.listnames() + '.' + self.name return self.name class Module(BaseElem): def __init__(self, path, _dict): self.path = path self.dict = _dict def __getattr__(self, attr): try: return self.dict[attr] except KeyError: raise AttributeError(attr) def get_children(self): values = self.dict.values() all = values[:] for v in values: all += v.get_children() return all def get_endline(start, lst): l = lst[::-1] for i in l: if i.lineno: return i.lineno end_ch = get_endline(None, i.getChildNodes()) if end_ch: return end_ch return start class Function(BaseElem): def __init__(self, name, parent, firstlineno, endlineno): self.firstlineno = firstlineno self.endlineno = endlineno self.name = name self.parent = parent def get_children(self): return [] class Method(BaseElem): def __init__(self, name, parent, firstlineno, endlineno): self.name = name self.firstlineno = firstlineno self.endlineno = endlineno self.parent = parent def function_from_ast(ast, cls_ast, cls=Function): startline = ast.lineno endline = get_endline(startline, ast.getChildNodes()) assert endline return cls(ast.name, cls_ast, startline, endline) def class_from_ast(cls_ast): bases = [i.name for i in cls_ast.bases if isinstance(i, ast.Name)] # XXX methods = {} startline = cls_ast.lineno name = cls_ast.name endline = get_endline(startline, cls_ast.getChildNodes()) cls = Class(name, startline, endline, bases, []) cls.methods = dict([(i.name, function_from_ast(i, cls, Method)) for i in \ cls_ast.code.nodes if isinstance(i, ast.Function)]) return cls class Class(BaseElem): def __init__(self, name, firstlineno, endlineno, bases, methods): self.bases = bases self.firstlineno = firstlineno self.endlineno = endlineno self.name = name self.methods = methods def __getattr__(self, attr): try: return self.methods[attr] except KeyError: raise AttributeError(attr) def get_children(self): return self.methods.values() def dir_nodes(st): """ List all the subnodes, which are not blockers """ res = [] for i in st.getChildNodes(): res.append(i) if not i.__class__ in blockers: res += dir_nodes(i) return res def update_mod_dict(imp_mod, mod_dict): # make sure that things that are in mod_dict, and not in imp_mod, # are not shown for key, value in mod_dict.items(): if not hasattr(imp_mod, key): del mod_dict[key] def parse_path(path): if not isinstance(path, PathBase): path = py.path.local(path) buf = path.open().read() st = parse(buf) # first go - we get all functions and classes defined on top-level nodes = dir_nodes(st) function_ast = [i for i in nodes if isinstance(i, ast.Function)] classes_ast = [i for i in nodes if isinstance(i, ast.Class)] mod_dict = dict([(i.name, function_from_ast(i, None)) for i in function_ast] + [(i.name, class_from_ast(i)) for i in classes_ast]) # we check all the elements, if they're really there try: mod = path.pyimport() except (KeyboardInterrupt, SystemExit): raise except: # catch all other import problems generically # XXX some import problem: we probably should not # pretend to have an empty module pass else: update_mod_dict(mod, mod_dict) return Module(path, mod_dict)
Python
import os, sys sys.path = ['/'.join(os.path.dirname(__file__).split(os.sep)[:-3])] + sys.path
Python
""" html - generating ad-hoc html out of source browser """ import py from py.xml import html, raw from compiler import ast import time from py.__.apigen.source.color import Tokenizer, PythonSchema class HtmlEnchanter(object): def __init__(self, mod): self.mod = mod self.create_caches() def create_caches(self): mod = self.mod linecache = {} for item in mod.get_children(): linecache[item.firstlineno] = item self.linecache = linecache def enchant_row(self, num, row): # add some informations to row, like functions defined in that # line, etc. try: item = self.linecache[num] # XXX: this should not be assertion, rather check, but we want to # know if stuff is working pos = row.find(item.name) assert pos != -1 end = len(item.name) + pos chunk = html.a(row[pos:end], href="#" + item.listnames(), name=item.listnames()) return [row[:pos], chunk, row[end:]] except KeyError: return [row] # no more info def prepare_line(text, tokenizer, encoding): """ adds html formatting to text items (list) only processes items if they're of a string type (or unicode) """ ret = [] for item in text: if type(item) in [str, unicode]: tokens = tokenizer.tokenize(item) for t in tokens: if not isinstance(t.data, unicode): data = unicode(t.data, encoding) else: data = t.data if t.type in ['keyword', 'alt_keyword', 'number', 'string', 'comment']: ret.append(html.span(data, class_=t.type)) else: ret.append(data) else: ret.append(item) return ret class HTMLDocument(object): def __init__(self, encoding, tokenizer=None): self.encoding = encoding self.html = root = html.html() self.head = head = self.create_head() root.append(head) self.body = body = self.create_body() root.append(body) self.table, self.tbody = table, tbody = self.create_table() body.append(table) if tokenizer is None: tokenizer = Tokenizer(PythonSchema) self.tokenizer = tokenizer def create_head(self): return html.head( html.title('source view'), html.style(""" body, td { background-color: #FFF; color: black; font-family: monospace, Monaco; } table, tr { margin: 0px; padding: 0px; border-width: 0px; } a { color: blue; font-weight: bold; text-decoration: none; } a:hover { color: #005; } .lineno { text-align: right; color: #555; width: 3em; padding-right: 1em; border: 0px solid black; border-right-width: 1px; } .code { padding-left: 1em; white-space: pre; } .comment { color: purple; } .string { color: #777; } .keyword { color: blue; } .alt_keyword { color: green; } """, type='text/css'), ) def create_body(self): return html.body() def create_table(self): table = html.table(cellpadding='0', cellspacing='0') tbody = html.tbody() table.append(tbody) return table, tbody def add_row(self, lineno, text): if text == ['']: text = [raw('&#xa0;')] else: text = prepare_line(text, self.tokenizer, self.encoding) self.tbody.append(html.tr(html.td(str(lineno), class_='lineno'), html.td(class_='code', *text))) def __unicode__(self): # XXX don't like to use indent=0 here, but else py.xml's indentation # messes up the html inside the table cells (which displays formatting) return self.html.unicode(indent=0) def create_html(mod): # out is some kind of stream #*[html.tr(html.td(i.name)) for i in mod.get_children()] lines = mod.path.open().readlines() enchanter = HtmlEnchanter(mod) enc = get_module_encoding(mod.path) doc = HTMLDocument(enc) for i, row in enumerate(lines): row = enchanter.enchant_row(i + 1, row) doc.add_row(i + 1, row) return unicode(doc) style = html.style(""" body, p, td { background-color: #FFF; color: black; font-family: monospace, Monaco; } td.type { width: 2em; } td.name { width: 30em; } td.mtime { width: 13em; } td.size { text-alignment: right; } """) def create_dir_html(path, href_prefix=''): h = html.html( html.head( html.title('directory listing of %s' % (path,)), style, ), ) body = html.body( html.h1('directory listing of %s' % (path,)), ) h.append(body) table = html.table() body.append(table) tbody = html.tbody() table.append(tbody) items = list(path.listdir()) items.sort(key=lambda p: p.basename) items.sort(key=lambda p: not p.check(dir=True)) for fpath in items: tr = html.tr() tbody.append(tr) td1 = html.td(fpath.check(dir=True) and 'D' or 'F', class_='type') tr.append(td1) href = fpath.basename if href_prefix: href = '%s%s' % (href_prefix, href) if fpath.check(dir=True): href += '/' td2 = html.td(html.a(fpath.basename, href=href), class_='name') tr.append(td2) td3 = html.td(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(fpath.mtime())), class_='mtime') tr.append(td3) if fpath.check(dir=True): size = '' unit = '' else: size = fpath.size() unit = 'B' for u in ['kB', 'MB', 'GB', 'TB']: if size > 1024: size = round(size / 1024.0, 2) unit = u td4 = html.td('%s %s' % (size, unit), class_='size') tr.append(td4) return unicode(h) def create_unknown_html(path): h = html.html( html.head( html.title('Can not display page'), style, ), html.body( html.p('The data URL (%s) does not contain Python code.' % (path,)) ), ) return h.unicode() _reg_enc = py.std.re.compile(r'coding[:=]\s*([-\w.]+)') def get_module_encoding(path): if hasattr(path, 'strpath'): path = path.strpath if path[-1] in ['c', 'o']: path = path[:-1] fpath = py.path.local(path) fp = fpath.open() lines = [] try: # encoding is only allowed in the first two lines for i in range(2): lines.append(fp.readline()) finally: fp.close() match = _reg_enc.search('\n'.join(lines)) if match: return match.group(1) return 'ISO-8859-1'
Python
import py import os html = py.xml.html # this here to serve two functions: first it makes the proto part of the temp # urls (see TempLinker) customizable easily (for tests and such) and second # it makes sure the temp links aren't replaced in generated source code etc. # for this file (and its tests) itself. TEMPLINK_PROTO = 'apigen.temp' def getrelfspath(dotted_name): # XXX need to make sure its imported on non-py lib return eval(dotted_name, {"py": py}) class LazyHref(object): def __init__(self, linker, linkid): self._linker = linker self._linkid = linkid def __unicode__(self): return unicode(self._linker.get_target(self._linkid)) class Linker(object): fromlocation = None def __init__(self): self._linkid2target = {} def get_lazyhref(self, linkid): return LazyHref(self, linkid) def set_link(self, linkid, target): assert (linkid not in self._linkid2target, 'linkid %r already used' % (linkid,)) self._linkid2target[linkid] = target def get_target(self, linkid): linktarget = self._linkid2target[linkid] if self.fromlocation is not None: linktarget = relpath(self.fromlocation, linktarget) return linktarget def call_withbase(self, base, func, *args, **kwargs): assert self.fromlocation is None self.fromlocation = base try: return func(*args, **kwargs) finally: del self.fromlocation class TempLinker(object): """ performs a similar role to the Linker, but with a different approach instead of returning 'lazy' hrefs, this returns a simple URL-style string the 'temporary urls' are replaced on the filesystem after building the files, so that means even though a second pass is still required, things don't have to be built in-memory (as with the Linker) """ fromlocation = None def __init__(self): self._linkid2target = {} def get_lazyhref(self, linkid): return '%s://%s' % (TEMPLINK_PROTO, linkid) def set_link(self, linkid, target): assert linkid not in self._linkid2target self._linkid2target[linkid] = target def get_target(self, tempurl, fromlocation=None): assert tempurl.startswith('%s://' % (TEMPLINK_PROTO,)) linkid = '://'.join(tempurl.split('://')[1:]) linktarget = self._linkid2target[linkid] if fromlocation is not None: linktarget = relpath(fromlocation, linktarget) return linktarget _reg_tempurl = py.std.re.compile('["\'](%s:\/\/[^"\s]*)["\']' % ( TEMPLINK_PROTO,)) def replace_dirpath(self, dirpath, stoponerrors=True): """ replace temporary links in all html files in dirpath and below """ for fpath in dirpath.visit('*.html'): html = fpath.read() while 1: match = self._reg_tempurl.search(html) if not match: break tempurl = match.group(1) try: html = html.replace('"' + tempurl + '"', '"' + self.get_target(tempurl, fpath.relto(dirpath)) + '"') except KeyError: if stoponerrors: raise html = html.replace('"' + tempurl + '"', '"apigen.notfound://%s"' % (tempurl,)) fpath.write(html) def relpath(p1, p2, sep=os.path.sep, back='..', normalize=True): """ create a relative path from p1 to p2 sep is the seperator used for input and (depending on the setting of 'normalize', see below) output back is the string used to indicate the parent directory when 'normalize' is True, any backslashes (\) in the path will be replaced with forward slashes, resulting in a consistent output on Windows and the rest of the world paths to directories must end on a / (URL style) """ if normalize: p1 = p1.replace(sep, '/') p2 = p2.replace(sep, '/') sep = '/' # XXX would be cool to be able to do long filename expansion and drive # letter fixes here, and such... iow: windows sucks :( if (p1.startswith(sep) ^ p2.startswith(sep)): raise ValueError("mixed absolute relative path: %r -> %r" %(p1, p2)) fromlist = p1.split(sep) tolist = p2.split(sep) # AA # AA BB -> AA/BB # # AA BB # AA CC -> CC # # AA BB # AA -> ../AA diffindex = 0 for x1, x2 in zip(fromlist, tolist): if x1 != x2: break diffindex += 1 commonindex = diffindex - 1 fromlist_diff = fromlist[diffindex:] tolist_diff = tolist[diffindex:] if not fromlist_diff: return sep.join(tolist[commonindex:]) backcount = len(fromlist_diff) if tolist_diff: return sep.join([back,]*(backcount-1) + tolist_diff) return sep.join([back,]*(backcount) + tolist[commonindex:])
Python
import py Option = py.test.config.Option option = py.test.config.addoptions("apigen test options", Option('', '--webcheck', action="store_true", dest="webcheck", default=False, help="run XHTML validation tests" ), )
Python
""" layout definition for generating api/source documents this is the place where customization can be done """ import py from py.__.doc import confrest from py.__.apigen import linker from py.__.doc.conftest import get_apigenpath, get_docpath here = py.magic.autopath().dirpath() class LayoutPage(confrest.PyPage): """ this provides the layout and style information """ stylesheets = [(here.join('../doc/style.css'), 'style.css'), (here.join('style.css'), 'apigen_style.css')] scripts = [(here.join('api.js'), 'api.js')] def __init__(self, *args, **kwargs): self.nav = kwargs.pop('nav') super(LayoutPage, self).__init__(*args, **kwargs) self.relpath = self.get_relpath() self.project.logo.attr.id = 'logo' def get_relpath(self): return linker.relpath(self.targetpath.strpath, get_apigenpath().strpath) + '/' def set_content(self, contentel): self.contentspace.append(contentel) def fill(self): super(LayoutPage, self).fill() self.body.insert(0, self.nav) def setup_scripts_styles(self, copyto=None): for path, name in self.stylesheets: if copyto: copyto.join(name).write(path.read()) self.head.append(py.xml.html.link(type='text/css', rel='stylesheet', href=self.relpath + name)) for path, name in self.scripts: if copyto: copyto.join(name).write(path.read()) self.head.append(py.xml.html.script(type="text/javascript", src=self.relpath + name))
Python
""" run 'py.test --apigen=<this script>' to get documentation exported """ import os import py import sys from py.__.apigen import htmlgen from py.__.apigen import linker from py.__.apigen import project from py.__.apigen.tracer.docstorage import pkg_to_dict from py.__.doc.conftest import get_apigenpath from layout import LayoutPage def get_documentable_items_pkgdir(pkgdir): """ get all documentable items from an initpkg pkgdir this is a generic implementation, import as 'get_documentable_items' from your module when using initpkg to get all public stuff in the package documented """ sys.path.insert(0, str(pkgdir.dirpath())) rootmod = __import__(pkgdir.basename) d = pkg_to_dict(rootmod) return pkgdir.basename, d def get_documentable_items(pkgdir): pkgname, pkgdict = get_documentable_items_pkgdir(pkgdir) from py.__.execnet.channel import Channel pkgdict['execnet.Channel'] = Channel Channel.__apigen_hide_from_nav__ = True return pkgname, pkgdict def build(pkgdir, dsa, capture): # create a linker (link database) for cross-linking l = linker.TempLinker() # create a project.Project instance to contain the LayoutPage instances proj = project.Project() # output dir from py.__.conftest import option targetdir = get_apigenpath() targetdir.ensure(dir=True) # find out what to build all_names = dsa._get_names(filter=lambda x, y: True) namespace_tree = htmlgen.create_namespace_tree(all_names) # and build it apb = htmlgen.ApiPageBuilder(targetdir, l, dsa, pkgdir, namespace_tree, proj, capture, LayoutPage) spb = htmlgen.SourcePageBuilder(targetdir, l, pkgdir, proj, capture, LayoutPage) apb.build_namespace_pages() class_names = dsa.get_class_names() apb.build_class_pages(class_names) function_names = dsa.get_function_names() apb.build_function_pages(function_names) spb.build_pages(pkgdir) l.replace_dirpath(targetdir)
Python
import py import os import inspect from py.__.apigen.layout import LayoutPage from py.__.apigen.source import browser as source_browser from py.__.apigen.source import html as source_html from py.__.apigen.source import color as source_color from py.__.apigen.tracer.description import is_private from py.__.apigen.rest.genrest import split_of_last_part from py.__.apigen.linker import relpath from py.__.apigen.html import H reversed = py.builtin.reversed sorted = py.builtin.sorted html = py.xml.html raw = py.xml.raw REDUCE_CALLSITES = True def is_navigateable(name): return (not is_private(name) and name != '__doc__') def show_property(name): if not name.startswith('_'): return True if name.startswith('__') and name.endswith('__'): # XXX do we need to skip more manually here? if (name not in dir(object) and name not in ['__doc__', '__dict__', '__name__', '__module__', '__weakref__', '__apigen_hide_from_nav__']): return True return False def deindent(str, linesep='\n'): """ de-indent string can be used to de-indent Python docstrings, it de-indents the first line to the side always, and determines the indentation of the rest of the text by taking that of the least indented (filled) line """ lines = str.strip().split(linesep) normalized = [] deindent = None normalized.append(lines[0].strip()) # replace tabs with spaces, empty lines that contain spaces only, and # find out what the smallest indentation is for line in lines[1:]: line = line.replace('\t', ' ' * 4) stripped = line.strip() if not stripped: normalized.append('') else: rstripped = line.rstrip() indent = len(rstripped) - len(stripped) if deindent is None or indent < deindent: deindent = indent normalized.append(line) ret = [normalized[0]] for line in normalized[1:]: if not line: ret.append(line) else: ret.append(line[deindent:]) return '%s\n' % (linesep.join(ret),) def get_linesep(s, default='\n'): """ return the line seperator of a string returns 'default' if no seperator can be found """ for sep in ('\r\n', '\r', '\n'): if sep in s: return sep return default def get_param_htmldesc(linker, func): """ get the html for the parameters of a function """ import inspect # XXX copy and modify formatargspec to produce html return inspect.formatargspec(*inspect.getargspec(func)) # some helper functionality def source_dirs_files(fspath): """ returns a tuple (dirs, files) for fspath dirs are all the subdirs, files are the files which are interesting in building source documentation for a Python code tree (basically all normal files excluding .pyc and .pyo ones) all files and dirs that have a name starting with . are considered hidden """ dirs = [] files = [] for child in fspath.listdir(): if child.basename.startswith('.'): continue if child.check(dir=True): dirs.append(child) elif child.check(file=True): if child.ext in ['.pyc', '.pyo']: continue files.append(child) return sorted(dirs), sorted(files) def create_namespace_tree(dotted_names): """ creates a tree (in dict form) from a set of dotted names """ ret = {} for dn in dotted_names: path = dn.split('.') for i in xrange(len(path)): ns = '.'.join(path[:i]) itempath = '.'.join(path[:i + 1]) if ns not in ret: ret[ns] = [] if itempath not in ret[ns]: ret[ns].append(itempath) return ret def wrap_page(project, title, targetpath, contentel, navel, basepath, pageclass): page = pageclass(project, title, targetpath, nav=navel, encoding='UTF-8') page.set_content(contentel) page.setup_scripts_styles(basepath) return page def enumerate_and_color(codelines, firstlineno, enc): snippet = H.SourceBlock() tokenizer = source_color.Tokenizer(source_color.PythonSchema) for i, line in enumerate(codelines): try: snippet.add_line(i + firstlineno + 1, source_html.prepare_line([line], tokenizer, enc)) except py.error.ENOENT: # error reading source code, giving up snippet = org break return snippet _get_obj_cache = {} def get_obj(dsa, pkg, dotted_name): full_dotted_name = '%s.%s' % (pkg.__name__, dotted_name) if dotted_name == '': return pkg try: return _get_obj_cache[dotted_name] except KeyError: pass path = dotted_name.split('.') ret = pkg for item in path: marker = [] ret = getattr(ret, item, marker) if ret is marker: try: ret = dsa.get_obj(dotted_name) except KeyError: raise NameError('can not access %s in %s' % (item, full_dotted_name)) else: break _get_obj_cache[dotted_name] = ret return ret def get_rel_sourcepath(projpath, filename, default=None): relpath = py.path.local(filename).relto(projpath) if not relpath: return default return relpath def get_package_revision(packageroot, _revcache={}): try: rev = _revcache[packageroot] except KeyError: wc = py.path.svnwc(packageroot) rev = None if wc.check(versioned=True): rev = py.path.svnwc(packageroot).info().rev _revcache[packageroot] = rev if packageroot.basename == "py": assert rev is not None return rev # the PageBuilder classes take care of producing the docs (using the stuff # above) class AbstractPageBuilder(object): pageclass = LayoutPage def write_page(self, title, reltargetpath, tag, nav): targetpath = self.base.join(reltargetpath) relbase= relpath('%s%s' % (targetpath.dirpath(), targetpath.sep), self.base.strpath + '/') page = wrap_page(self.project, title, targetpath, tag, nav, self.base, self.pageclass) # we write the page with _temporary_ hrefs here, need to be replaced # from the TempLinker later content = page.unicode() targetpath.ensure() targetpath.write(content.encode("utf8")) class SourcePageBuilder(AbstractPageBuilder): """ builds the html for a source docs page """ def __init__(self, base, linker, projroot, project, capture=None, pageclass=LayoutPage): self.base = base self.linker = linker self.projroot = projroot self.project = project self.capture = capture self.pageclass = pageclass def build_navigation(self, fspath): nav = H.Navigation(class_='sidebar') relpath = fspath.relto(self.projroot) path = relpath.split(os.path.sep) indent = 0 # build links to parents if relpath != '': for i in xrange(len(path)): dirpath = os.path.sep.join(path[:i]) abspath = self.projroot.join(dirpath).strpath if i == 0: text = self.projroot.basename else: text = path[i-1] nav.append(H.NavigationItem(self.linker, abspath, text, indent, False)) indent += 1 # build siblings or children and self if fspath.check(dir=True): # we're a dir, build ourselves and our children dirpath = fspath nav.append(H.NavigationItem(self.linker, dirpath.strpath, dirpath.basename, indent, True)) indent += 1 elif fspath.strpath == self.projroot.strpath: dirpath = fspath else: # we're a file, build our parent's children only dirpath = fspath.dirpath() diritems, fileitems = source_dirs_files(dirpath) for dir in diritems: nav.append(H.NavigationItem(self.linker, dir.strpath, dir.basename, indent, False)) for file in fileitems: selected = (fspath.check(file=True) and file.basename == fspath.basename) nav.append(H.NavigationItem(self.linker, file.strpath, file.basename, indent, selected)) return nav re = py.std.re _reg_body = re.compile(r'<body[^>]*>(.*)</body>', re.S) def build_python_page(self, fspath): # XXX two reads of the same file here... not very bad (disk caches # and such) but also not very nice... enc = source_html.get_module_encoding(fspath.strpath) source = fspath.read() sep = get_linesep(source) colored = [enumerate_and_color(source.split(sep), 0, enc)] tag = H.PythonSource(colored) nav = self.build_navigation(fspath) return tag, nav def build_dir_page(self, fspath): dirs, files = source_dirs_files(fspath) dirs = [(p.basename, self.linker.get_lazyhref(str(p))) for p in dirs] files = [(p.basename, self.linker.get_lazyhref(str(p))) for p in files] tag = H.DirList(dirs, files) nav = self.build_navigation(fspath) return tag, nav def build_nonpython_page(self, fspath): try: tag = H.NonPythonSource(unicode(fspath.read(), 'utf-8')) except UnicodeError: tag = H.NonPythonSource('no source available (binary file?)') nav = self.build_navigation(fspath) return tag, nav def build_pages(self, base): for fspath in [base] + list(base.visit()): if fspath.ext in ['.pyc', '.pyo']: continue if self.capture: self.capture.err.writeorg('.') relfspath = fspath.relto(base) if relfspath.find('%s.' % (os.path.sep,)) > -1: # skip hidden dirs and files continue elif fspath.check(dir=True): if relfspath != '': relfspath += os.path.sep reloutputpath = 'source%s%sindex.html' % (os.path.sep, relfspath) else: reloutputpath = "source%s%s.html" % (os.path.sep, relfspath) reloutputpath = reloutputpath.replace(os.path.sep, '/') outputpath = self.base.join(reloutputpath) self.linker.set_link(str(fspath), reloutputpath) self.build_page(fspath, outputpath, base) def build_page(self, fspath, outputpath, base): """ build syntax-colored source views """ if fspath.check(ext='.py'): try: tag, nav = self.build_python_page(fspath) except (KeyboardInterrupt, SystemError): raise except: # XXX strange stuff going wrong at times... need to fix raise exc, e, tb = py.std.sys.exc_info() print '%s - %s' % (exc, e) print print ''.join(py.std.traceback.format_tb(tb)) print '-' * 79 del tb tag, nav = self.build_nonpython_page(fspath) elif fspath.check(dir=True): tag, nav = self.build_dir_page(fspath) else: tag, nav = self.build_nonpython_page(fspath) title = 'sources for %s' % (fspath.basename,) rev = self.get_revision(fspath) if rev: title += ' [rev. %s]' % (rev,) reltargetpath = outputpath.relto(self.base).replace(os.path.sep, '/') self.write_page(title, reltargetpath, tag, nav) _revcache = {} def get_revision(self, path): return get_package_revision(self.projroot) strpath = path.strpath if strpath in self._revcache: return self._revcache[strpath] wc = py.path.svnwc(path) if wc.check(versioned=True): rev = wc.info().created_rev else: rev = None self._revcache[strpath] = rev return rev class ApiPageBuilder(AbstractPageBuilder): """ builds the html for an api docs page """ def __init__(self, base, linker, dsa, projroot, namespace_tree, project, capture=None, pageclass=LayoutPage): self.base = base self.linker = linker self.dsa = dsa self.projroot = projroot self.projpath = py.path.local(projroot) self.namespace_tree = namespace_tree self.project = project self.capture = capture self.pageclass = pageclass pkgname = self.dsa.get_module_name().split('/')[-1] self.pkg = __import__(pkgname) def build_callable_view(self, dotted_name): """ build the html for a class method """ # XXX we may want to have seperate func = get_obj(self.dsa, self.pkg, dotted_name) docstring = func.__doc__ if docstring: docstring = deindent(docstring) localname = func.__name__ argdesc = get_param_htmldesc(self.linker, func) valuedesc = self.build_callable_signature_description(dotted_name) sourcefile = inspect.getsourcefile(func) callable_source = self.dsa.get_function_source(dotted_name) # i assume they're both either available or unavailable(XXX ?) is_in_pkg = self.is_in_pkg(sourcefile) href = None text = 'could not get to source file' colored = [] if sourcefile and callable_source: enc = source_html.get_module_encoding(sourcefile) tokenizer = source_color.Tokenizer(source_color.PythonSchema) firstlineno = func.func_code.co_firstlineno sep = get_linesep(callable_source) org = callable_source.split(sep) colored = [enumerate_and_color(org, firstlineno, enc)] relpath = get_rel_sourcepath(self.projroot, sourcefile, sourcefile) text = 'source: %s' % (relpath,) if is_in_pkg: href = self.linker.get_lazyhref(sourcefile) csource = H.SourceSnippet(text, href, colored) cslinks = self.build_callsites(dotted_name) snippet = H.FunctionDescription(localname, argdesc, docstring, valuedesc, csource, cslinks) return snippet def build_class_view(self, dotted_name): """ build the html for a class """ cls = get_obj(self.dsa, self.pkg, dotted_name) # XXX is this a safe check? try: sourcefile = inspect.getsourcefile(cls) except TypeError: sourcefile = None docstring = cls.__doc__ if docstring: docstring = deindent(docstring) if not hasattr(cls, '__name__'): clsname = 'instance of %s' % (cls.__class__.__name__,) else: clsname = cls.__name__ bases = self.build_bases(dotted_name) properties = self.build_properties(cls) methods = self.build_methods(dotted_name) if sourcefile is None: sourcelink = H.div('no source available') else: if sourcefile[-1] in ['o', 'c']: sourcefile = sourcefile[:-1] sourcelink = H.div(H.a('view source', href=self.linker.get_lazyhref(sourcefile))) snippet = H.ClassDescription( # XXX bases HTML H.ClassDef(clsname, bases, docstring, sourcelink, properties, methods), ) return snippet def build_bases(self, dotted_name): ret = [] bases = self.dsa.get_possible_base_classes(dotted_name) for base in bases: try: obj = self.dsa.get_obj(base.name) except KeyError: ret.append((base.name, None)) else: href = self.linker.get_lazyhref(base.name) ret.append((base.name, href)) return ret def build_properties(self, cls): properties = [] for attr in dir(cls): val = getattr(cls, attr) if show_property(attr) and not callable(val): if isinstance(val, property): val = '<property object (dynamically calculated value)>' properties.append((attr, val)) properties.sort(lambda x,y : cmp(x[0], y[0])) # sort on name return properties def build_methods(self, dotted_name): ret = [] methods = self.dsa.get_class_methods(dotted_name) # move all __*__ methods to the back methods = ([m for m in methods if not m.startswith('_')] + [m for m in methods if m.startswith('_')]) # except for __init__, which should be first if '__init__' in methods: methods.remove('__init__') methods.insert(0, '__init__') for method in methods: ret += self.build_callable_view('%s.%s' % (dotted_name, method)) return ret def build_namespace_view(self, namespace_dotted_name, item_dotted_names): """ build the html for a namespace (module) """ obj = get_obj(self.dsa, self.pkg, namespace_dotted_name) docstring = obj.__doc__ snippet = H.NamespaceDescription( H.NamespaceDef(namespace_dotted_name), H.Docstring(docstring or '*no docstring available*') ) for dotted_name in sorted(item_dotted_names): itemname = dotted_name.split('.')[-1] if (not is_navigateable(itemname) or self.is_hidden_from_nav(dotted_name)): continue snippet.append( H.NamespaceItem( H.a(itemname, href=self.linker.get_lazyhref(dotted_name) ) ) ) return snippet def build_class_pages(self, classes_dotted_names): passed = [] for dotted_name in sorted(classes_dotted_names): if self.capture: self.capture.err.writeorg('.') parent_dotted_name, _ = split_of_last_part(dotted_name) try: sibling_dotted_names = self.namespace_tree[parent_dotted_name] except KeyError: # no siblings (built-in module or sth) sibling_dotted_names = [] tag = H.Content(self.build_class_view(dotted_name)) nav = self.build_navigation(dotted_name, False) reltargetpath = "api/%s.html" % (dotted_name,) self.linker.set_link(dotted_name, reltargetpath) title = '%s API' % (dotted_name,) rev = self.get_revision(dotted_name) if rev: title += ' [rev. %s]' % (rev,) self.write_page(title, reltargetpath, tag, nav) return passed def build_function_pages(self, method_dotted_names): passed = [] for dotted_name in sorted(method_dotted_names): if self.capture: self.capture.err.writeorg('.') # XXX should we create a build_function_view instead? parent_dotted_name, _ = split_of_last_part(dotted_name) sibling_dotted_names = self.namespace_tree[parent_dotted_name] tag = H.Content(self.build_callable_view(dotted_name)) nav = self.build_navigation(dotted_name, False) reltargetpath = "api/%s.html" % (dotted_name,) self.linker.set_link(dotted_name, reltargetpath) title = '%s API' % (dotted_name,) rev = self.get_revision(dotted_name) if rev: title += ' [rev. %s]' % (rev,) self.write_page(title, reltargetpath, tag, nav) return passed def build_namespace_pages(self): passed = [] module_name = self.dsa.get_module_name().split('/')[-1] names = self.namespace_tree.keys() names.sort() function_names = self.dsa.get_function_names() class_names = self.dsa.get_class_names() for dotted_name in sorted(names): if self.capture: self.capture.err.writeorg('.') if dotted_name in function_names or dotted_name in class_names: continue subitem_dotted_names = self.namespace_tree[dotted_name] tag = H.Content(self.build_namespace_view(dotted_name, subitem_dotted_names)) nav = self.build_navigation(dotted_name, True) if dotted_name == '': reltargetpath = 'api/index.html' else: reltargetpath = 'api/%s.html' % (dotted_name,) self.linker.set_link(dotted_name, reltargetpath) title_name = dotted_name if dotted_name == '': title_name = self.dsa.get_module_name() title = 'index of %s' % (title_name,) rev = self.get_revision(dotted_name) if rev: title += ' [rev. %s]' % (rev,) self.write_page(title, reltargetpath, tag, nav) return passed def build_navigation(self, dotted_name, build_children=True): navitems = [] # top namespace, index.html module_name = self.dsa.get_module_name().split('/')[-1] navitems.append(H.NavigationItem(self.linker, '', module_name, 0, True)) def build_nav_level(dotted_name, depth=1): navitems = [] path = dotted_name.split('.')[:depth] siblings = self.namespace_tree.get('.'.join(path[:-1])) for dn in sorted(siblings): selected = dn == '.'.join(path) sibpath = dn.split('.') sibname = sibpath[-1] if not is_navigateable(sibname): continue if self.is_hidden_from_nav(dn): continue navitems.append(H.NavigationItem(self.linker, dn, sibname, depth, selected)) if selected: lastlevel = dn.count('.') == dotted_name.count('.') if not lastlevel: navitems += build_nav_level(dotted_name, depth+1) elif lastlevel and build_children: # XXX hack navitems += build_nav_level('%s.' % (dotted_name,), depth+1) return navitems navitems += build_nav_level(dotted_name) return H.Navigation(class_='sidebar', *navitems) def build_callable_signature_description(self, dotted_name): args, retval = self.dsa.get_function_signature(dotted_name) valuedesc = H.ValueDescList() for name, _type in args: valuedesc.append(self.build_sig_value_description(name, _type)) if retval: retval = self.process_type_link(retval) ret = H.div(H.div('arguments:'), valuedesc, H.div('return value:'), retval or 'None') return ret def build_sig_value_description(self, name, _type): l = self.process_type_link(_type) items = [] next = "%s: " % name for item in l: if isinstance(item, str): next += item else: if next: items.append(next) next = "" items.append(item) if next: items.append(next) return H.ValueDescItem(*items) def process_type_link(self, _type): # now we do simple type dispatching and provide a link in this case lst = [] data = self.dsa.get_type_desc(_type) if not data: for i in _type.striter(): if isinstance(i, str): lst.append(i) else: lst += self.process_type_link(i) return lst name, _desc_type, is_degenerated = data if not is_degenerated: linktarget = self.linker.get_lazyhref(name) lst.append(H.a(str(_type), href=linktarget)) else: raise IOError('do not think we ever get here?') # we should provide here some way of linking to sourcegen directly lst.append(name) return lst def is_in_pkg(self, sourcefile): return py.path.local(sourcefile).relto(self.projpath) _processed_callsites = {} def build_callsites(self, dotted_name): callstack = self.dsa.get_function_callpoints(dotted_name) cslinks = [] for i, (cs, _) in enumerate(callstack): if REDUCE_CALLSITES: key = (cs[0].filename, cs[0].lineno) if key in self._processed_callsites: # process one call site per line of test code when # REDUCE_CALLSITES is set to True continue self._processed_callsites[key] = 1 link = self.build_callsite(dotted_name, cs, i) cslinks.append(link) return cslinks def build_callsite(self, dotted_name, call_site, index): tbtag = H.Content(self.gen_traceback(dotted_name, reversed(call_site))) parent_dotted_name, _ = split_of_last_part(dotted_name) nav = self.build_navigation(parent_dotted_name, False) id = 'callsite_%s_%s' % (dotted_name, index) reltargetpath = "api/%s.html" % (id,) self.linker.set_link(id, reltargetpath) href = self.linker.get_lazyhref(id) self.write_page('call site %s for %s' % (index, dotted_name), reltargetpath, tbtag, nav) sourcefile = call_site[0].filename sourcepath = get_rel_sourcepath(self.projpath, sourcefile, sourcefile) return H.CallStackLink(sourcepath, call_site[0].lineno + 1, href) _reg_source = py.std.re.compile(r'([^>]*)<(.*)>') def gen_traceback(self, dotted_name, call_site): tbtag = H.CallStackDescription() for frame in call_site: lineno = frame.lineno - frame.firstlineno source = frame.source sourcefile = frame.filename tokenizer = source_color.Tokenizer(source_color.PythonSchema) mangled = [] source = str(source) sep = get_linesep(source) for i, sline in enumerate(source.split(sep)): if i == lineno: l = '-> %s' % (sline,) else: l = ' %s' % (sline,) mangled.append(l) if sourcefile: relpath = get_rel_sourcepath(self.projpath, sourcefile, sourcefile) linktext = '%s - line %s' % (relpath, frame.lineno + 1) # skip py.code.Source objects and source files outside of the # package is_code_source = self._reg_source.match(sourcefile) if (not is_code_source and self.is_in_pkg(sourcefile) and py.path.local(sourcefile).check()): enc = source_html.get_module_encoding(sourcefile) href = self.linker.get_lazyhref(sourcefile) sourcelink = H.a(linktext, href=href) else: enc = 'latin-1' sourcelink = H.div(linktext) colored = [enumerate_and_color(mangled, frame.firstlineno, enc)] else: sourcelink = H.div('source unknown (%s)' % (sourcefile,)) colored = mangled[:] tbtag.append(sourcelink) tbtag.append(H.div(*colored)) return tbtag def is_hidden_from_nav(self, dotted_name): obj = get_obj(self.dsa, self.pkg, dotted_name) return getattr(obj, '__apigen_hide_from_nav__', False) _revcache = {} def get_proj_revision(self): if '' in self._revcache: return self._revcache[''] wc = py.path.svnwc(self.projpath) if wc.check(versioned=True): rev = wc.info().created_rev else: rev = None self._revcache[''] = rev return rev def get_revision(self, dotted_name): return get_package_revision(self.projroot) if dotted_name in self._revcache: return self._revcache[dotted_name] obj = get_obj(self.dsa, self.pkg, dotted_name) rev = None try: sourcefile = inspect.getsourcefile(obj) except TypeError: pass else: if sourcefile is not None: if sourcefile[-1] in ['o', 'c']: sourcefile = sourcefile[:-1] wc = py.path.svnwc(sourcefile) if wc.check(versioned=True): rev = wc.info().created_rev rev = rev or self.get_proj_revision() self._revcache[dotted_name] = rev return rev
Python
""" Generating ReST output (raw, not python) out of data that we know about function calls """ import py import sys import re from py.__.apigen.tracer.docstorage import DocStorageAccessor from py.__.rest.rst import * # XXX Maybe we should list it here from py.__.apigen.tracer import model from py.__.rest.transform import RestTransformer def split_of_last_part(name): name = name.split(".") return ".".join(name[:-1]), name[-1] class AbstractLinkWriter(object): """ Class implementing writing links to source code. There should exist various classes for that, different for Trac, different for CVSView, etc. """ def getlinkobj(self, obj, name): return None def getlink(self, filename, lineno, funcname): raise NotImplementedError("Abstract link writer") def getpkgpath(self, filename): # XXX: very simple thing path = py.path.local(filename).dirpath() while 1: try: path.join('__init__.py').stat() path = path.dirpath() except py.error.ENOENT: return path class ViewVC(AbstractLinkWriter): """ Link writer for ViewVC version control viewer """ def __init__(self, basepath): # XXX: should try to guess from a working copy of svn self.basepath = basepath def getlink(self, filename, lineno, funcname): path = str(self.getpkgpath(filename)) assert filename.startswith(path), ( "%s does not belong to package %s" % (filename, path)) relname = filename[len(path):] if relname.endswith('.pyc'): relname = relname[:-1] sep = py.std.os.sep if sep != '/': relname = relname.replace(sep, '/') return ('%s:%s' % (filename, lineno), self.basepath + relname[1:] + '?view=markup') class SourceView(AbstractLinkWriter): def __init__(self, baseurl): self.baseurl = baseurl if self.baseurl.endswith("/"): self.baseurl = baseurl[:-1] def getlink(self, filename, lineno, funcname): if filename.endswith('.pyc'): filename = filename[:-1] if filename is None: return "<UNKNOWN>:%s" % funcname,"" pkgpath = self.getpkgpath(filename) if not filename.startswith(str(pkgpath)): # let's leave it return "<UNKNOWN>:%s" % funcname,"" relname = filename[len(str(pkgpath)):] if relname.endswith('.pyc'): relname = relname[:-1] sep = py.std.os.sep if sep != '/': relname = relname.replace(sep, '/') return "%s:%s" % (relname, funcname),\ "%s%s#%s" % (self.baseurl, relname, funcname) def getlinkobj(self, name, obj): try: filename = sys.modules[obj.__module__].__file__ return self.getlink(filename, 0, name) except AttributeError: return None class DirectPaste(AbstractLinkWriter): """ No-link writer (inliner) """ def getlink(self, filename, lineno, funcname): return ('%s:%s' % (filename, lineno), "") class DirectFS(AbstractLinkWriter): """ Creates links to the files on the file system (for local docs) """ def getlink(self, filename, lineno, funcname): return ('%s:%s' % (filename, lineno), 'file://%s' % (filename,)) class PipeWriter(object): def __init__(self, output=sys.stdout): self.output = output def write_section(self, name, rest): text = "Contents of file %s.txt:" % (name,) self.output.write(text + "\n") self.output.write("=" * len(text) + "\n") self.output.write("\n") self.output.write(rest.text() + "\n") def getlink(self, type, targetname, targetfilename): return '%s.txt' % (targetfilename,) class DirWriter(object): def __init__(self, directory=None): if directory is None: self.directory = py.test.ensuretemp("rstoutput") else: self.directory = py.path.local(directory) def write_section(self, name, rest): filename = '%s.txt' % (name,) self.directory.ensure(filename).write(rest.text()) def getlink(self, type, targetname, targetfilename): # we assume the result will get converted to HTML... return '%s.html' % (targetfilename,) class FileWriter(object): def __init__(self, fpath): self.fpath = fpath self.fp = fpath.open('w+') self._defined_targets = [] def write_section(self, name, rest): self.fp.write(rest.text()) self.fp.flush() def getlink(self, type, targetname, targetbasename): # XXX problem: because of docutils' named anchor generation scheme, # a method Foo.__init__ would clash with Foo.init (underscores are # removed) if targetname in self._defined_targets: return None self._defined_targets.append(targetname) targetname = targetname.lower().replace('.', '-').replace('_', '-') while '--' in targetname: targetname = targetname.replace('--', '-') if targetname.startswith('-'): targetname = targetname[1:] if targetname.endswith('-'): targetname = targetname[:-1] return '#%s-%s' % (type, targetname) class HTMLDirWriter(object): def __init__(self, indexhandler, filehandler, directory=None): self.indexhandler = indexhandler self.filehandler = filehandler if directory is None: self.directory = py.test.ensuretemp('dirwriter') else: self.directory = py.path.local(directory) def write_section(self, name, rest): if name == 'index': handler = self.indexhandler else: handler = self.filehandler h = handler(name) t = RestTransformer(rest) t.parse(h) self.directory.ensure('%s.html' % (name,)).write(h.html) def getlink(self, type, targetname, targetfilename): return '%s.html' % (targetfilename,) class RestGen(object): def __init__(self, dsa, linkgen, writer=PipeWriter()): #assert isinstance(linkgen, DirectPaste), ( # "Cannot use different linkgen by now") self.dsa = dsa self.linkgen = linkgen self.writer = writer self.tracebacks = {} def write(self): """write the data to the writer""" modlist = self.get_module_list() classlist = self.get_class_list(module='') funclist = self.get_function_list() modlist.insert(0, ['', classlist, funclist]) indexrest = self.build_index([t[0] for t in modlist]) self.writer.write_section('index', Rest(*indexrest)) self.build_modrest(modlist) def build_modrest(self, modlist): modrest = self.build_modules(modlist) for name, rest, classlist, funclist in modrest: mname = name if mname == '': mname = self.dsa.get_module_name() self.writer.write_section('module_%s' % (mname,), Rest(*rest)) for cname, crest, cfunclist in classlist: self.writer.write_section('class_%s' % (cname,), Rest(*crest)) for fname, frest, tbdata in cfunclist: self.writer.write_section('method_%s' % (fname,), Rest(*frest)) for tbname, tbrest in tbdata: self.writer.write_section('traceback_%s' % (tbname,), Rest(*tbrest)) for fname, frest, tbdata in funclist: self.writer.write_section('function_%s' % (fname,), Rest(*frest)) for tbname, tbrest in tbdata: self.writer.write_section('traceback_%s' % (tbname,), Rest(*tbrest)) def build_classrest(self, classlist): classrest = self.build_classes(classlist) for cname, rest, cfunclist in classrest: self.writer.write_section('class_%s' % (cname,), Rest(*rest)) for fname, rest in cfunclist: self.writer.write_section('method_%s' % (fname,), Rest(*rest)) def build_funcrest(self, funclist): funcrest = self.build_functions(funclist) for fname, rest, tbdata in funcrest: self.writer.write_section('function_%s' % (fname,), Rest(*rest)) for tbname, tbrest in tbdata: self.writer.write_section('traceback_%s' % (tbname,), Rest(*tbrest)) def build_index(self, modules): rest = [Title('index', abovechar='=', belowchar='=')] rest.append(Title('exported modules:', belowchar='=')) for module in modules: mtitle = module if module == '': module = self.dsa.get_module_name() mtitle = '%s (top-level)' % (module,) linktarget = self.writer.getlink('module', module, 'module_%s' % (module,)) rest.append(ListItem(Link(mtitle, linktarget))) return rest def build_modules(self, modules): ret = [] for module, classes, functions in modules: mname = module if mname == '': mname = self.dsa.get_module_name() rest = [Title('module: %s' % (mname,), abovechar='=', belowchar='='), Title('index:', belowchar='=')] if classes: rest.append(Title('classes:', belowchar='^')) for cls, bases, cfunclist in classes: linktarget = self.writer.getlink('class', cls, 'class_%s' % (cls,)) rest.append(ListItem(Link(cls, linktarget))) classrest = self.build_classes(classes) if functions: rest.append(Title('functions:', belowchar='^')) for func in functions: if module: func = '%s.%s' % (module, func) linktarget = self.writer.getlink('function', func, 'function_%s' % (func,)) rest.append(ListItem(Link(func, linktarget))) funcrest = self.build_functions(functions, module, False) ret.append((module, rest, classrest, funcrest)) return ret def build_classes(self, classes): ret = [] for cls, bases, functions in classes: rest = [Title('class: %s' % (cls,), belowchar='='), LiteralBlock(self.dsa.get_doc(cls))] # link to source link_to_class = self.linkgen.getlinkobj(cls, self.dsa.get_obj(cls)) if link_to_class: rest.append(Paragraph(Text("source: "), Link(*link_to_class))) if bases: rest.append(Title('base classes:', belowchar='^')), for base in bases: rest.append(ListItem(self.make_class_link(base))) if functions: rest.append(Title('functions:', belowchar='^')) for (func, origin) in functions: linktarget = self.writer.getlink('method', '%s.%s' % (cls, func), 'method_%s.%s' % (cls, func)) rest.append(ListItem(Link('%s.%s' % (cls, func), linktarget))) funcrest = self.build_functions(functions, cls, True) ret.append((cls, rest, funcrest)) return ret def build_functions(self, functions, parent='', methods=False): ret = [] for function in functions: origin = None if methods: function, origin = function if parent: function = '%s.%s' % (parent, function) rest, tbrest = self.write_function(function, origin=origin, ismethod=methods) ret.append((function, rest, tbrest)) return ret def get_module_list(self): visited = [] ret = [] for name in self.dsa.get_class_names(): if '.' in name: module, classname = split_of_last_part(name) if module in visited: continue visited.append(module) ret.append((module, self.get_class_list(module), self.get_function_list(module))) return ret def get_class_list(self, module): ret = [] for name in self.dsa.get_class_names(): classname = name if '.' in name: classmodule, classname = split_of_last_part(name) if classmodule != module: continue elif module != '': continue bases = self.dsa.get_possible_base_classes(name) ret.append((name, bases, self.get_method_list(name))) return ret def get_function_list(self, module=''): ret = [] for name in self.dsa.get_function_names(): funcname = name if '.' in name: funcpath, funcname = split_of_last_part(name) if funcpath != module: continue elif module != '': continue ret.append(funcname) return ret def get_method_list(self, classname): methodnames = self.dsa.get_class_methods(classname) return [(mn, self.dsa.get_method_origin('%s.%s' % (classname, mn))) for mn in methodnames] def process_type_link(self, _type): # now we do simple type dispatching and provide a link in this case lst = [] data = self.dsa.get_type_desc(_type) if not data: for i in _type.striter(): if isinstance(i, str): lst.append(i) else: lst += self.process_type_link(i) return lst name, _desc_type, is_degenerated = data if not is_degenerated: linktarget = self.writer.getlink(_desc_type, name, '%s_%s' % (_desc_type, name)) lst.append(Link(str(_type), linktarget)) else: # we should provide here some way of linking to sourcegen directly lst.append(name) return lst def write_function(self, functionname, origin=None, ismethod=False, belowchar='-'): # XXX I think the docstring should either be split on \n\n and cleaned # from indentation, or treated as ReST too (although this is obviously # dangerous for non-ReST docstrings)... if ismethod: title = Title('method: %s' % (functionname,), belowchar=belowchar) else: title = Title('function: %s' % (functionname,), belowchar=belowchar) lst = [title, LiteralBlock(self.dsa.get_doc(functionname)), LiteralBlock(self.dsa.get_function_definition(functionname))] link_to_function = self.linkgen.getlinkobj(functionname, self.dsa.get_obj(functionname)) if link_to_function: lst.insert(1, Paragraph(Text("source: "), Link(*link_to_function))) opar = Paragraph(Strong('origin'), ":") if origin: opar.add(self.make_class_link(origin)) else: opar.add(Text('<UNKNOWN>')) lst.append(opar) lst.append(Paragraph(Strong("where"), ":")) args, retval = self.dsa.get_function_signature(functionname) for name, _type in args + [('return value', retval)]: l = self.process_type_link(_type) items = [] next = "%s :: " % name for item in l: if isinstance(item, str): next += item else: if next: items.append(Text(next)) next = "" items.append(item) if next: items.append(Text(next)) lst.append(ListItem(*items)) local_changes = self.dsa.get_function_local_changes(functionname) if local_changes: lst.append(Paragraph(Strong('changes in __dict__ after execution'), ":")) for k, changeset in local_changes.iteritems(): lst.append(ListItem('%s: %s' % (k, ', '.join(changeset)))) exceptions = self.dsa.get_function_exceptions(functionname) if exceptions: lst.append(Paragraph(Strong('exceptions that might appear during ' 'execution'), ":")) for exc in exceptions: lst.append(ListItem(exc)) # XXX: right now we leave it alone # XXX missing implementation of dsa.get_function_location() #filename, lineno = self.dsa.get_function_location(functionname) #linkname, linktarget = self.linkgen.getlink(filename, lineno) #if linktarget: # lst.append(Paragraph("Function source: ", # Link(linkname, linktarget))) #else: source = self.dsa.get_function_source(functionname) if source: lst.append(Paragraph(Strong('function source'), ":")) lst.append(LiteralBlock(source)) # call sites.. call_sites = self.dsa.get_function_callpoints(functionname) tbrest = [] if call_sites: call_site_title = Title("call sites:", belowchar='+') lst.append(call_site_title) # we have to think differently here. I would go for: # 1. A quick'n'dirty statement where call has appeared first # (topmost) # 2. Link to short traceback # 3. Link to long traceback for call_site, _ in call_sites: fdata, tbdata = self.call_site_link(functionname, call_site) lst += fdata tbrest.append(tbdata) return lst, tbrest def call_site_link(self, functionname, call_site): tbid, tbrest = self.gen_traceback(functionname, call_site) tbname = '%s.%s' % (functionname, tbid) linktarget = self.writer.getlink('traceback', tbname, 'traceback_%s' % (tbname,)) frest = [Paragraph("called in %s" % call_site[0].filename), Paragraph(Link("traceback %s" % (tbname,), linktarget))] return frest, (tbname, tbrest) def gen_traceback(self, funcname, call_site): tbid = len(self.tracebacks.setdefault(funcname, [])) self.tracebacks[funcname].append(call_site) tbrest = [Title('traceback for %s' % (funcname,))] for line in call_site: lineno = line.lineno - line.firstlineno linkname, linktarget = self.linkgen.getlink(line.filename, line.lineno + 1, funcname) if linktarget: tbrest.append(Paragraph(Link(linkname, linktarget))) else: tbrest.append(Paragraph(linkname)) try: source = line.source except IOError: source = "*cannot get source*" mangled = [] for i, sline in enumerate(str(source).split('\n')): if i == lineno: line = '-> %s' % (sline,) else: line = ' %s' % (sline,) mangled.append(line) tbrest.append(LiteralBlock('\n'.join(mangled))) return tbid, tbrest def make_class_link(self, desc): if not desc or desc.is_degenerated: # create dummy link here, or no link at all return Strong(desc.name) else: linktarget = self.writer.getlink('class', desc.name, 'class_%s' % (desc.name,)) return Link(desc.name, linktarget)
Python
from py.__.rest.transform import HTMLHandler, entitize from py.xml import html, raw class PageHandler(HTMLHandler): def startDocument(self): super(PageHandler, self).startDocument() self.head.append(html.link(type='text/css', rel='stylesheet', href='style.css')) title = self.title[0] breadcrumb = ''.join([unicode(el) for el in self.breadcrumb(title)]) self.body.append(html.div(raw(breadcrumb), class_='breadcrumb')) def handleLink(self, text, target): self.tagstack[-1].append(html.a(text, href=target, target='content')) def breadcrumb(self, title): if title != 'index': type, path = title.split('_', 1) path = path.split('.') module = None cls = None func = None meth = None if type == 'module': module = '.'.join(path) elif type == 'class': module = '.'.join(path[:-1]) cls = path[-1] elif type == 'method': module = '.'.join(path[:-2]) cls = path[-2] meth = path[-1] else: module = '.'.join(path[:-1]) func = path[-1] if module: yield html.a(module, href='module_%s.html' % (module,)) if type != 'module': yield u'.' if cls: s = cls if module: s = '%s.%s' % (module, cls) yield html.a(cls, href='class_%s.html' % (s,)) if type != 'class': yield u'.' if meth: s = '%s.%s' % (cls, meth) if module: s = '%s.%s.%s' % (module, cls, meth) yield html.a(meth, href='method_%s.html' % (s,)) if func: s = func if module: s = '%s.%s' % (module, func) yield html.a(func, href='function_%s.html' % (s,)) class IndexHandler(PageHandler): ignore_text = False def startDocument(self): super(IndexHandler, self).startDocument() self.head.append(html.script(type='text/javascript', src='apigen.js')) self._push(html.div(id='sidebar')) def endDocument(self): maindiv = html.div(id="main") maindiv.append(html.div(id="breadcrumb")) maindiv.append(html.iframe(name='content', id='content', src='module_py.html')) self.body.append(maindiv) def startTitle(self, depth): self.ignore_text = True def endTitle(self, depth): self.ignore_text = False def handleText(self, text): if self.ignore_text: return super(IndexHandler, self).handleText(text)
Python
class SomeClass(object): """Some class definition""" def __init__(self, a): self.a = a def method(self, a, b, c): """method docstring""" return a + b + c
Python
from somemodule import SomeClass class SomeSubClass(SomeClass): """Some subclass definition""" def fun(a, b, c): """Some docstring Let's make it span a couple of lines to be interesting... Note: * rest * should * be * supported * or * ignored... """ return "d"
Python
import py from py.__.apigen.tracer import model from py.__.code.source import getsource import types import inspect import copy MAX_CALL_SITES = 20 set = py.builtin.set def is_private(name): return name.startswith('_') and not name.startswith('__') class CallFrame(object): def __init__(self, frame): self.filename = frame.code.raw.co_filename self.lineno = frame.lineno self.firstlineno = frame.code.firstlineno try: self.source = getsource(frame.code.raw) except IOError: self.source = "could not get to source" def _getval(self): return (self.filename, self.lineno) def __hash__(self): return hash(self._getval()) def __eq__(self, other): return self._getval() == other._getval() def __ne__(self, other): return not self == other class CallStack(object): def __init__(self, tb): #if isinstance(tb, py.code.Traceback): # self.tb = tb #else: # self.tb = py.code.Traceback(tb) self.tb = [CallFrame(frame) for frame in tb] #def _getval(self): # return [(frame.code.raw.co_filename, frame.lineno+1) for frame # in self] def __hash__(self): return hash(tuple(self.tb)) def __eq__(self, other): return self.tb == other.tb def __ne__(self, other): return not self == other #def __getattr__(self, attr): # return getattr(self.tb, attr) def __iter__(self): return iter(self.tb) def __getitem__(self, item): return self.tb[item] def __len__(self): return len(self.tb) def __cmp__(self, other): return cmp(self.tb, other.tb) def cut_stack(stack, frame, upward_frame=None): if hasattr(frame, 'raw'): frame = frame.raw if upward_frame: if hasattr(upward_frame, 'raw'): upward_frame = upward_frame.raw lst = [py.code.Frame(i) for i in stack[stack.index(frame):\ stack.index(upward_frame)+1]] if len(lst) > 1: return CallStack(lst[:-1]) return CallStack(lst) return CallStack([py.code.Frame(i) for i in stack[stack.index(frame):]]) ##class CallSite(object): ## def __init__(self, filename, lineno): ## self.filename = filename ## self.lineno = lineno ## ## def get_tuple(self): ## return self.filename, self.lineno ## ## def __hash__(self): ## return hash((self.filename, self.lineno)) ## ## def __eq__(self, other): ## return (self.filename, self.lineno) == (other.filename, other.lineno) ## ## def __ne__(self, other): ## return not self == other ## ## def __cmp__(self, other): ## if self.filename < other.filename: ## return -1 ## if self.filename > other.filename: ## return 1 ## if self.lineno < other.lineno: ## return -1 ## if self.lineno > other.lineno: ## return 1 ## return 0 class NonHashableObject(object): def __init__(self, cls): self.cls = cls def __hash__(self): raise NotImplementedError("Object of type %s are unhashable" % self.cls) class Desc(object): def __init__(self, name, pyobj, **kwargs): self.pyobj = pyobj self.is_degenerated = False self.name = name if type(self) is Desc: # do not override property... self.code = NonHashableObject(self.__class__) # dummy think that makes code unhashable # we make new base class instead of using pypy's one because # of type restrictions of pypy descs def __hash__(self): return hash(self.code) def __eq__(self, other): if isinstance(other, Desc): return self.code == other.code if isinstance(other, types.CodeType): return self.code == other if isinstance(other, tuple) and len(other) == 2: return self.code == other return False def __ne__(self, other): return not self == other # This set of functions will not work on Desc, because we need to # define code somehow class FunctionDesc(Desc): def __init__(self, *args, **kwargs): super(FunctionDesc, self).__init__(*args, **kwargs) self.inputcells = [model.s_ImpossibleValue for i in xrange(self.\ code.co_argcount)] self.call_sites = {} self.keep_frames = kwargs.get('keep_frames', False) self.frame_copier = kwargs.get('frame_copier', lambda x:x) self.retval = model.s_ImpossibleValue self.exceptions = {} def consider_call(self, inputcells): for cell_num, cell in enumerate(inputcells): self.inputcells[cell_num] = model.unionof(cell, self.inputcells[cell_num]) def consider_call_site(self, frame, cut_frame): if len(self.call_sites) > MAX_CALL_SITES: return stack = [i[0] for i in inspect.stack()] cs = cut_stack(stack, frame, cut_frame) self.call_sites[cs] = cs def consider_exception(self, exc, value): self.exceptions[exc] = True def get_call_sites(self): # convinient accessor for various data which we keep there if not self.keep_frames: return [(key, val) for key, val in self.call_sites.iteritems()] else: lst = [] for key, val in self.call_sites.iteritems(): for frame in val: lst.append((key, frame)) return lst def consider_return(self, arg): self.retval = model.unionof(arg, self.retval) def consider_start_locals(self, frame): pass def consider_end_locals(self, frame): pass def getcode(self): return self.pyobj.func_code code = property(getcode) def get_local_changes(self): return {} class ClassDesc(Desc): def __init__(self, *args, **kwargs): super(ClassDesc, self).__init__(*args, **kwargs) self.fields = {} # we'll gather informations about methods and possibly # other variables encountered here def getcode(self): # This is a hack. We're trying to return as much close to __init__ # of us as possible, but still hashable object if hasattr(self.pyobj, '__init__'): if hasattr(self.pyobj.__init__, 'im_func') and \ hasattr(self.pyobj.__init__.im_func, 'func_code'): result = self.pyobj.__init__.im_func.func_code else: result = self.pyobj.__init__ else: result = self.pyobj try: hash(result) except KeyboardInterrupt, SystemExit: raise except: # XXX UUuuuu bare except here. What can it really rise??? try: hash(self.pyobj) result = self.pyobj except: result = self return result code = property(getcode) def consider_call(self, inputcells): if '__init__' in self.fields: md = self.fields['__init__'] else: md = MethodDesc(self.name + '.__init__', self.pyobj.__init__) self.fields['__init__'] = md md.consider_call(inputcells) def consider_return(self, arg): pass # we *know* what return value we do have def consider_exception(self, exc, value): if '__init__' in self.fields: md = self.fields['__init__'] else: md = MethodDesc(self.name + '.__init__', self.pyobj.__init__) self.fields['__init__'] = md md.consider_exception(exc, value) def consider_start_locals(self, frame): if '__init__' in self.fields: md = self.fields['__init__'] md.consider_start_locals(frame) def consider_end_locals(self, frame): if '__init__' in self.fields: md = self.fields['__init__'] md.consider_end_locals(frame) def consider_call_site(self, frame, cut_frame): self.fields['__init__'].consider_call_site(frame, cut_frame) def add_method_desc(self, name, methoddesc): self.fields[name] = methoddesc def getfields(self): # return fields of values that has been used l = [i for i, v in self.fields.iteritems() if not is_private(i)] return l def getbases(self): bases = [] tovisit = [self.pyobj] while tovisit: current = tovisit.pop() if current is not self.pyobj: bases.append(current) tovisit += [b for b in current.__bases__ if b not in bases] return bases bases = property(getbases) ## def has_code(self, code): ## # check __init__ method ## return self.pyobj.__init__.im_func.func_code is code ## ## def consider_call(self, inputcells): ## # special thing, make MethodDesc for __init__ ## ## class MethodDesc(FunctionDesc): def __init__(self, *args, **kwargs): super(MethodDesc, self).__init__(*args, **kwargs) self.old_dict = {} self.changeset = {} # right now it's not different than method desc, only code is different def getcode(self): return self.pyobj.im_func.func_code code = property(getcode) ## def has_code(self, code): ## return self.pyobj.im_func.func_code is code def __hash__(self): return hash((self.code, self.pyobj.im_class)) def __eq__(self, other): if isinstance(other, tuple): return self.code is other[0] and self.pyobj.im_class is other[1] if isinstance(other, MethodDesc): return self.pyobj is other.pyobj return False def consider_start_locals(self, frame): # XXX recursion issues? obj = frame.f_locals[self.pyobj.im_func.func_code.co_varnames[0]] try: if not obj: # static method return except AttributeError: return self.old_dict = self.perform_dict_copy(obj.__dict__) def perform_dict_copy(self, d): if d is None: return {} return d.copy() def consider_end_locals(self, frame): obj = frame.f_locals[self.pyobj.im_func.func_code.co_varnames[0]] try: if not obj: # static method return except AttributeError: return # store the local changes # update self.changeset self.update_changeset(obj.__dict__) def get_local_changes(self): return self.changeset def set_changeset(changeset, key, value): if key not in changeset: changeset[key] = set([value]) else: changeset[key].add(value) set_changeset = staticmethod(set_changeset) def update_changeset(self, new_dict): changeset = self.changeset for k, v in self.old_dict.iteritems(): if k not in new_dict: self.set_changeset(changeset, k, "deleted") elif new_dict[k] != v: self.set_changeset(changeset, k, "changed") for k, v in new_dict.iteritems(): if k not in self.old_dict: self.set_changeset(changeset, k, "created") return changeset
Python
import py class DescPlaceholder(object): pass class ClassPlaceholder(object): pass class SerialisableClassDesc(object): def __init__(self, original_desc): self.is_degenerated = original_desc.is_degenerated self.name = original_desc.name class PermaDocStorage(object): """ Picklable version of docstorageaccessor """ function_fields = ['source', 'signature', 'definition', 'callpoints', 'local_changes', 'exceptions'] def __init__(self, dsa): """ Initialise from original doc storage accessor """ self.names = {} self.module_info = dsa.get_module_info() self.module_name = dsa.get_module_name() self._save_functions(dsa) self._save_classes(dsa) def _save_functions(self, dsa): names = dsa.get_function_names() self.function_names = names for name in names: self._save_function(dsa, name) def _save_function(self, dsa, name): ph = DescPlaceholder() ph.__doc__ = dsa.get_doc(name) for field in self.function_fields: setattr(ph, field, getattr(dsa, 'get_function_%s' % field)(name)) self.names[name] = ph return ph def _save_classes(self, dsa): names = dsa.get_class_names() self.class_names = names for name in names: ph = ClassPlaceholder() ph.__doc__ = dsa.get_doc(name) methods = dsa.get_class_methods(name) ph.methods = methods ph.base_classes = [SerialisableClassDesc(i) for i in dsa.get_possible_base_classes(name)] for method in methods: method_name = name + "." + method mh = self._save_function(dsa, name + "." + method) mh.origin = SerialisableClassDesc(dsa.get_method_origin( method_name)) self.names[name] = ph def get_class_methods(self, name): desc = self.names[name] assert isinstance(desc, ClassPlaceholder) return desc.methods def get_doc(self, name): return self.names[name].__doc__ def get_module_info(self): return self.module_info def get_module_name(self): return self.module_name def get_class_names(self): return self.class_names def get_function_names(self): return self.function_names def get_method_origin(self, name): # returns a DESCRIPTION of a method origin, to make sure where we # write it return self.names[name].origin def get_possible_base_classes(self, name): # returns list of descs of base classes return self.names[name].base_classes # This are placeholders to provide something more reliable def get_type_desc(self, _type): return None #def get_obj(self, name): # This is quite hairy, get rid of it soon # # returns a pyobj # pass for field in PermaDocStorage.function_fields: d = {"field": field} func_name = "get_function_%s" % (field, ) exec py.code.Source(""" def %s(self, name, field=field): return getattr(self.names[name], field) """ % (func_name, )).compile() in d setattr(PermaDocStorage, func_name, d[func_name])
Python
""" simple tracer for API generation """ import py import sys import types from py.__.apigen.tracer.description import FunctionDesc from py.__.apigen.tracer.docstorage import DocStorage class UnionError(Exception): pass class NoValue(object): pass class Tracer(object): """ Basic tracer object, used for gathering additional info about API functions """ def __init__(self, docstorage): self.docstorage = docstorage self.tracing = False _locals = {} def _tracer(self, frame, event, arg): # perform actuall tracing frame = py.code.Frame(frame) if event == 'call': assert arg is None try: self.docstorage.consider_call(frame, py.code.Frame(sys._getframe(2)), self.frame) except ValueError: self.docstorage.consider_call(frame, None, self.frame) elif event == 'return': self.docstorage.consider_return(frame, arg) elif event == 'exception': self.docstorage.consider_exception(frame, arg) return self._tracer def start_tracing(self): if self.tracing: return self.tracing = True self.frame = py.code.Frame(sys._getframe(1)) sys.settrace(self._tracer) def end_tracing(self): self.tracing = False sys.settrace(None)
Python
""" model - type system model for apigen """ # we implement all the types which are in the types.*, naming # scheme after pypy's import py import types set = py.builtin.set # __extend__ and pairtype? class SomeObject(object): typedef = types.ObjectType def __repr__(self): return "<%s>" % self.__class__.__name__[4:] return str(self.typedef)[7:-2] def unionof(self, other): if isinstance(other, SomeImpossibleValue): return self if isinstance(other, SomeUnion): return other.unionof(self) if self == other: return self return SomeUnion([self, other]) def gettypedef(self): return self.typedef def __hash__(self): return hash(self.__class__) def __eq__(self, other): return self.__class__ == other.__class__ def __ne__(self, other): return not self == other # this is to provide possibility of eventually linking some stuff def striter(self): yield str(self) class SomeUnion(object): # empty typedef def __init__(self, possibilities): self.possibilities = set(possibilities) def unionof(self, other): if isinstance(other, SomeUnion): return SomeUnion(self.possibilities.union(other.possibilities)) return SomeUnion(list(self.possibilities) + [other]) def __eq__(self, other): if type(other) is not SomeUnion: return False return self.possibilities == other.possibilities def __ne__(self, other): return not self == other def __repr__(self): return "AnyOf(%s)" % ", ".join([str(i) for i in list(self.possibilities)]) def gettypedef(self): return (None, None) def striter(self): yield "AnyOf(" for num, i in enumerate(self.possibilities): yield i if num != len(self.possibilities) - 1: yield ", " yield ")" class SomeBoolean(SomeObject): typedef = types.BooleanType class SomeBuffer(SomeObject): typedef = types.BufferType class SomeBuiltinFunction(SomeObject): typedef = types.BuiltinFunctionType #class SomeBuiltinMethod(SomeObject): # typedef = types.BuiltinMethodType class SomeClass(SomeObject): typedef = types.ClassType def __init__(self, cls): self.cls = cls self.name = cls.__name__ self.id = id(cls) def __getstate__(self): return (self.name, self.id) def __setstate__(self, state): self.name, self.id = state self.cls = None def __hash__(self): return hash("Class") ^ hash(self.id) def __eq__(self, other): if type(other) is not SomeClass: return False return self.id == other.id def unionof(self, other): if type(other) is not SomeClass or self.id is not other.id: return super(SomeClass, self).unionof(other) return self def __repr__(self): return "Class %s" % self.name class SomeCode(SomeObject): typedef = types.CodeType class SomeComplex(SomeObject): typedef = types.ComplexType class SomeDictProxy(SomeObject): typedef = types.DictProxyType class SomeDict(SomeObject): typedef = types.DictType class SomeEllipsis(SomeObject): typedef = types.EllipsisType class SomeFile(SomeObject): typedef = types.FileType class SomeFloat(SomeObject): typedef = types.FloatType class SomeFrame(SomeObject): typedef = types.FrameType class SomeFunction(SomeObject): typedef = types.FunctionType class SomeGenerator(SomeObject): typedef = types.GeneratorType class SomeInstance(SomeObject): def __init__(self, classdef): self.classdef = classdef def __hash__(self): return hash("SomeInstance") ^ hash(self.classdef) def __eq__(self, other): if type(other) is not SomeInstance: return False return other.classdef == self.classdef def unionof(self, other): if type(other) is not SomeInstance: return super(SomeInstance, self).unionof(other) if self.classdef == other.classdef: return self return SomeInstance(unionof(self.classdef, other.classdef)) def __repr__(self): return "<Instance of %s>" % str(self.classdef) def striter(self): yield "<Instance of " yield self.classdef yield ">" typedef = types.InstanceType class SomeInt(SomeObject): typedef = types.IntType class SomeLambda(SomeObject): typedef = types.LambdaType class SomeList(SomeObject): typedef = types.ListType class SomeLong(SomeObject): typedef = types.LongType class SomeMethod(SomeObject): typedef = types.MethodType class SomeModule(SomeObject): typedef = types.ModuleType class SomeNone(SomeObject): typedef = types.NoneType class SomeNotImplemented(SomeObject): typedef = types.NotImplementedType class SomeObject(SomeObject): typedef = types.ObjectType class SomeSlice(SomeObject): typedef = types.SliceType class SomeString(SomeObject): typedef = types.StringType class SomeTraceback(SomeObject): typedef = types.TracebackType class SomeTuple(SomeObject): typedef = types.TupleType class SomeType(SomeObject): typedef = types.TypeType class SomeUnboundMethod(SomeObject): typedef = types.UnboundMethodType class SomeUnicode(SomeObject): typedef = types.UnicodeType class SomeXRange(SomeObject): typedef = types.XRangeType class SomeImpossibleValue(SomeObject): def unionof(self, other): return other def __repr__(self): return "<UNKNOWN>" s_ImpossibleValue = SomeImpossibleValue() s_None = SomeNone() s_Ellipsis = SomeEllipsis() def guess_type(x): # this is mostly copy of immutablevalue if hasattr(x, 'im_self') and x.im_self is None: x = x.im_func assert not hasattr(x, 'im_self') tp = type(x) if tp is bool: result = SomeBoolean() elif tp is int: result = SomeInt() elif issubclass(tp, str): result = SomeString() elif tp is unicode: result = SomeUnicode() elif tp is tuple: result = SomeTuple() #result = SomeTuple(items = [self.immutablevalue(e, need_const) for e in x]) elif tp is float: result = SomeFloat() elif tp is list: #else: # listdef = ListDef(self, s_ImpossibleValue) # for e in x: # listdef.generalize(self.annotation_from_example(e)) result = SomeList() elif tp is dict: ## dictdef = DictDef(self, ## s_ImpossibleValue, ## s_ImpossibleValue, ## is_r_dict = tp is r_dict) ## if tp is r_dict: ## s_eqfn = self.immutablevalue(x.key_eq) ## s_hashfn = self.immutablevalue(x.key_hash) ## dictdef.dictkey.update_rdict_annotations(s_eqfn, ## s_hashfn) ## for ek, ev in x.iteritems(): ## dictdef.generalize_key(self.annotation_from_example(ek)) ## dictdef.generalize_value(self.annotation_from_example(ev)) result = SomeDict() elif tp is types.ModuleType: result = SomeModule() elif callable(x): #if hasattr(x, '__self__') and x.__self__ is not None: # # for cases like 'l.append' where 'l' is a global constant list # s_self = self.immutablevalue(x.__self__, need_const) # result = s_self.find_method(x.__name__) # if result is None: # result = SomeObject() #elif hasattr(x, 'im_self') and hasattr(x, 'im_func'): # # on top of PyPy, for cases like 'l.append' where 'l' is a # # global constant list, the find_method() returns non-None # s_self = self.immutablevalue(x.im_self, need_const) # result = s_self.find_method(x.im_func.__name__) #else: # result = None #if result is None: # if (self.annotator.policy.allow_someobjects # and getattr(x, '__module__', None) == '__builtin__' # # XXX note that the print support functions are __builtin__ # and tp not in (types.FunctionType, types.MethodType)): ## result = SomeObject() # result.knowntype = tp # at least for types this needs to be correct # else: # result = SomePBC([self.getdesc(x)]) if tp is types.BuiltinFunctionType or tp is types.BuiltinMethodType: result = SomeBuiltinFunction() elif hasattr(x, 'im_func'): result = SomeMethod() elif hasattr(x, 'func_code'): result = SomeFunction() elif hasattr(x, '__class__'): if x.__class__ is type: result = SomeClass(x) else: result = SomeInstance(SomeClass(x.__class__)) elif tp is types.ClassType: result = SomeClass(x) elif x is None: return s_None elif hasattr(x, '__class__'): result = SomeInstance(SomeClass(x.__class__)) else: result = SomeObject() # XXX here we might want to consider stuff like # buffer, slice, etc. etc. Let's leave it for now return result def unionof(first, other): return first.unionof(other)
Python
""" This module is keeping track about API informations as well as providing some interface to easily access stored data """ import py import sys import types import inspect from py.__.apigen.tracer.description import FunctionDesc, ClassDesc, \ MethodDesc, Desc from py.__.apigen.tracer import model sorted = py.builtin.sorted def pkg_to_dict(module): defs = module.__package__.exportdefs d = {} for key, value in defs.iteritems(): chain = key.split('.') base = module for elem in chain: base = getattr(base, elem) if value[1] == '*': d.update(get_star_import_tree(base, key)) else: d[key] = base return d def get_star_import_tree(module, modname): """ deal with '*' entries in an initpkg situation """ ret = {} modpath = py.path.local(inspect.getsourcefile(module)) pkgpath = module.__package__.getpath() for objname in dir(module): if objname.startswith('_'): continue # also skip __*__ attributes obj = getattr(module, objname) if (isinstance(obj, types.ClassType) or isinstance(obj, types.ObjectType)): try: sourcefile_object = py.path.local( inspect.getsourcefile(obj)) except TypeError: continue else: if sourcefile_object.strpath != modpath.strpath: # not in this package continue dotted_name = '%s.%s' % (modname, objname) ret[dotted_name] = obj return ret class DocStorage(object): """ Class storing info about API """ def __init__(self): self.module_name = None def consider_call(self, frame, caller_frame, upward_cut_frame=None): assert isinstance(frame, py.code.Frame) desc = self.find_desc(frame.code, frame.raw.f_locals) if desc: self.generalize_args(desc, frame) desc.consider_call_site(caller_frame, upward_cut_frame) desc.consider_start_locals(frame) def generalize_args(self, desc, frame): args = [arg for key, arg in frame.getargs()] #self.call_stack.append((desc, args)) desc.consider_call([model.guess_type(arg) for arg in args]) def generalize_retval(self, desc, arg): desc.consider_return(model.guess_type(arg)) def consider_return(self, frame, arg): assert isinstance(frame, py.code.Frame) desc = self.find_desc(frame.code, frame.raw.f_locals) if desc: self.generalize_retval(desc, arg) desc.consider_end_locals(frame) def consider_exception(self, frame, arg): desc = self.find_desc(frame.code, frame.raw.f_locals) if desc: exc_class, value, _ = arg desc.consider_exception(exc_class, value) def find_desc(self, code, locals): try: # argh, very fragile specialcasing return self.desc_cache[(code.raw, locals[code.raw.co_varnames[0]].__class__)] except (KeyError, IndexError, AttributeError): # XXX hrmph return self.desc_cache.get(code.raw, None) #for desc in self.descs.values(): # if desc.has_code(frame.code.raw): # return desc #return None def make_cache(self): self.desc_cache = {} for key, desc in self.descs.iteritems(): self.desc_cache[desc] = desc def from_dict(self, _dict, keep_frames=False, module_name=None): self.module_name = module_name self.descs = {} for key, val in _dict.iteritems(): to_key, to_val = self.make_desc(key, val) if to_key: self.descs[to_key] = to_val self.make_cache() # XXX return self # XXX: This function becomes slowly outdated and even might go away at some # point. The question is whether we want to use tracer.magic or not # at all def add_desc(self, name, value, **kwargs): key = name count = 1 while key in self.descs: key = "%s_%d" % (name, count) count += 1 key, desc = self.make_desc(key, value, **kwargs) if key: self.descs[key] = desc self.desc_cache[desc] = desc return desc else: return None def make_desc(self, key, value, add_desc=True, **kwargs): if isinstance(value, types.FunctionType): desc = FunctionDesc(key, value, **kwargs) elif isinstance(value, (types.ObjectType, types.ClassType)): desc = ClassDesc(key, value, **kwargs) # XXX: This is the special case when we do not have __init__ # in dir(value) for uknown reason. Need to investigate it for name in dir(value) + ['__init__']: field = getattr(value, name, None) if isinstance(field, types.MethodType) and \ isinstance(field.im_func, types.FunctionType): real_name = key + '.' + name md = MethodDesc(real_name, field) if add_desc: # XXX hack self.descs[real_name] = md desc.add_method_desc(name, md) # Some other fields as well? elif isinstance(value, types.MethodType): desc = MethodDesc(key, value, **kwargs) else: desc = Desc(value) return (key, desc) # How to do it better? I want a desc to be a key # value, but I cannot get full object if I do a lookup def from_pkg(self, module, keep_frames=False): self.module = module self.from_dict(pkg_to_dict(module), keep_frames, module.__name__) # XXX return self def from_module(self, func): raise NotImplementedError("From module") class AbstractDocStorageAccessor(object): def __init__(self): raise NotImplementedError("Purely virtual object") def get_function_names(self): """ Returning names of all functions """ def get_class_names(self): """ Returning names of all classess """ def get_doc(self, name): """ Returning __doc__ of a function """ def get_function_definition(self, name): """ Returns definition of a function (source) """ def get_function_signature(self, name): """ Returns types of a function """ def get_function_callpoints(self, name): """ Returns list of all callpoints """ def get_module_name(self): pass def get_class_methods(self, name): """ Returns all methods of a class """ #def get_object_info(self, key): # def get_module_info(self): """ Returns module information """ class DocStorageAccessor(AbstractDocStorageAccessor): """ Set of helper functions to access DocStorage, separated in different class to keep abstraction """ def __init__(self, ds): self.ds = ds def _get_names(self, filter): return [i for i, desc in self.ds.descs.iteritems() if filter(i, desc)] def get_function_names(self): return sorted(self._get_names(lambda i, desc: type(desc) is FunctionDesc)) def get_class_names(self): return sorted(self._get_names(lambda i, desc: isinstance(desc, ClassDesc))) #def get_function(self, name): # return self.ds.descs[name].pyobj def get_doc(self, name): return self.ds.descs[name].pyobj.__doc__ or "*Not documented*" def get_function_definition(self, name): desc = self.ds.descs[name] assert isinstance(desc, FunctionDesc) code = py.code.Code(desc.code) return code.fullsource[code.firstlineno] def get_function_signature(self, name): desc = self.ds.descs[name] # we return pairs of (name, type) here names = desc.pyobj.func_code.co_varnames[ :desc.pyobj.func_code.co_argcount] types = desc.inputcells return zip(names, types), desc.retval def get_function_source(self, name): desc = self.ds.descs[name] try: return str(py.code.Source(desc.pyobj)) except IOError: return "Cannot get source" def get_function_callpoints(self, name): # return list of tuple (filename, fileline, frame) return self.ds.descs[name].get_call_sites() def get_function_local_changes(self, name): return self.ds.descs[name].get_local_changes() def get_function_exceptions(self, name): return sorted([i.__name__ for i in self.ds.descs[name].exceptions.keys()]) def get_module_name(self): if self.ds.module_name is not None: return self.ds.module_name elif hasattr(self.ds, 'module'): return self.ds.module.__name__ return "Unknown module" def get_class_methods(self, name): desc = self.ds.descs[name] assert isinstance(desc, ClassDesc) return sorted(desc.getfields()) def get_module_info(self): module = getattr(self.ds, 'module', None) if module is None: return "Lack of module info" try: retval = module.__doc__ or "*undocumented*" retval = module.__package__.description retval = module.__package__.long_description except AttributeError: pass return retval def get_type_desc(self, _type): # XXX We provide only classes here if not isinstance(_type, model.SomeClass): return None # XXX we might want to cache it at some point for key, desc in self.ds.descs.iteritems(): if desc.pyobj == _type.cls: return key, 'class', desc.is_degenerated return None def get_method_origin(self, name): method = self.ds.descs[name].pyobj cls = method.im_class if not cls.__bases__: return self.desc_from_pyobj(cls, cls.__name__) curr = cls while curr: for base in curr.__bases__: basefunc = getattr(base, method.im_func.func_name, None) if (basefunc is not None and hasattr(basefunc, 'im_func') and hasattr(basefunc.im_func, 'func_code') and basefunc.im_func.func_code is method.im_func.func_code): curr = base break else: break return self.desc_from_pyobj(curr, curr.__name__) def get_possible_base_classes(self, name): cls = self.ds.descs[name].pyobj if not hasattr(cls, '__bases__'): return [] retval = [] for base in cls.__bases__: desc = self.desc_from_pyobj(base, base.__name__) if desc is not None: retval.append(desc) return retval def desc_from_pyobj(self, pyobj, name): for desc in self.ds.descs.values(): if isinstance(desc, ClassDesc) and desc.pyobj is pyobj: return desc # otherwise create empty desc key, desc = self.ds.make_desc(name, pyobj, False) #self.ds.descs[key] = desc desc.is_degenerated = True # and make sure we'll not try to link to it directly return desc def get_obj(self, name): return self.ds.descs[name].pyobj
Python
def cut_pyc(f_name): if f_name.endswith('.pyc'): return f_name[:-1] return f_name
Python
def foo(x): return x + 1 def bar(x): return x + 2
Python